1#define JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/atomic.h"
7#include "jemalloc/internal/buf_writer.h"
8#include "jemalloc/internal/ctl.h"
9#include "jemalloc/internal/emap.h"
10#include "jemalloc/internal/extent_dss.h"
11#include "jemalloc/internal/extent_mmap.h"
12#include "jemalloc/internal/fxp.h"
13#include "jemalloc/internal/san.h"
14#include "jemalloc/internal/hook.h"
15#include "jemalloc/internal/jemalloc_internal_types.h"
16#include "jemalloc/internal/log.h"
17#include "jemalloc/internal/malloc_io.h"
18#include "jemalloc/internal/mutex.h"
19#include "jemalloc/internal/nstime.h"
20#include "jemalloc/internal/rtree.h"
21#include "jemalloc/internal/safety_check.h"
22#include "jemalloc/internal/sc.h"
23#include "jemalloc/internal/spin.h"
24#include "jemalloc/internal/sz.h"
25#include "jemalloc/internal/ticker.h"
26#include "jemalloc/internal/thread_event.h"
27#include "jemalloc/internal/util.h"
28
29/******************************************************************************/
30/* Data. */
31
32/* Runtime configuration options. */
33const char *je_malloc_conf
34#ifndef _WIN32
35 JEMALLOC_ATTR(weak)
36#endif
37 ;
38/*
39 * The usual rule is that the closer to runtime you are, the higher priority
40 * your configuration settings are (so the jemalloc config options get lower
41 * priority than the per-binary setting, which gets lower priority than the /etc
42 * setting, which gets lower priority than the environment settings).
43 *
44 * But it's a fairly common use case in some testing environments for a user to
45 * be able to control the binary, but nothing else (e.g. a performancy canary
46 * uses the production OS and environment variables, but can run any binary in
47 * those circumstances). For these use cases, it's handy to have an in-binary
48 * mechanism for overriding environment variable settings, with the idea that if
49 * the results are positive they get promoted to the official settings, and
50 * moved from the binary to the environment variable.
51 *
52 * We don't actually want this to be widespread, so we'll give it a silly name
53 * and not mention it in headers or documentation.
54 */
55const char *je_malloc_conf_2_conf_harder
56#ifndef _WIN32
57 JEMALLOC_ATTR(weak)
58#endif
59 ;
60
61bool opt_abort =
62#ifdef JEMALLOC_DEBUG
63 true
64#else
65 false
66#endif
67 ;
68bool opt_abort_conf =
69#ifdef JEMALLOC_DEBUG
70 true
71#else
72 false
73#endif
74 ;
75/* Intentionally default off, even with debug builds. */
76bool opt_confirm_conf = false;
77const char *opt_junk =
78#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
79 "true"
80#else
81 "false"
82#endif
83 ;
84bool opt_junk_alloc =
85#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
86 true
87#else
88 false
89#endif
90 ;
91bool opt_junk_free =
92#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
93 true
94#else
95 false
96#endif
97 ;
98bool opt_trust_madvise =
99#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
100 false
101#else
102 true
103#endif
104 ;
105
106bool opt_cache_oblivious =
107#ifdef JEMALLOC_CACHE_OBLIVIOUS
108 true
109#else
110 false
111#endif
112 ;
113
114zero_realloc_action_t opt_zero_realloc_action =
115#ifdef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
116 zero_realloc_action_free
117#else
118 zero_realloc_action_alloc
119#endif
120 ;
121
122atomic_zu_t zero_realloc_count = ATOMIC_INIT(0);
123
124const char *zero_realloc_mode_names[] = {
125 "alloc",
126 "free",
127 "abort",
128};
129
130/*
131 * These are the documented values for junk fill debugging facilities -- see the
132 * man page.
133 */
134static const uint8_t junk_alloc_byte = 0xa5;
135static const uint8_t junk_free_byte = 0x5a;
136
137static void default_junk_alloc(void *ptr, size_t usize) {
138 memset(ptr, junk_alloc_byte, usize);
139}
140
141static void default_junk_free(void *ptr, size_t usize) {
142 memset(ptr, junk_free_byte, usize);
143}
144
145void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc;
146void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free;
147
148bool opt_utrace = false;
149bool opt_xmalloc = false;
150bool opt_experimental_infallible_new = false;
151bool opt_zero = false;
152unsigned opt_narenas = 0;
153fxp_t opt_narenas_ratio = FXP_INIT_INT(4);
154
155unsigned ncpus;
156
157unsigned opt_debug_double_free_max_scan =
158 SAFETY_CHECK_DOUBLE_FREE_MAX_SCAN_DEFAULT;
159
160/* Protects arenas initialization. */
161malloc_mutex_t arenas_lock;
162
163/* The global hpa, and whether it's on. */
164bool opt_hpa = false;
165hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT;
166sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT;
167
168/*
169 * Arenas that are used to service external requests. Not all elements of the
170 * arenas array are necessarily used; arenas are created lazily as needed.
171 *
172 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
173 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
174 * takes some action to create them and allocate from them.
175 *
176 * Points to an arena_t.
177 */
178JEMALLOC_ALIGNED(CACHELINE)
179atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
180static atomic_u_t narenas_total; /* Use narenas_total_*(). */
181/* Below three are read-only after initialization. */
182static arena_t *a0; /* arenas[0]. */
183unsigned narenas_auto;
184unsigned manual_arena_base;
185
186malloc_init_t malloc_init_state = malloc_init_uninitialized;
187
188/* False should be the common case. Set to true to trigger initialization. */
189bool malloc_slow = true;
190
191/* When malloc_slow is true, set the corresponding bits for sanity check. */
192enum {
193 flag_opt_junk_alloc = (1U),
194 flag_opt_junk_free = (1U << 1),
195 flag_opt_zero = (1U << 2),
196 flag_opt_utrace = (1U << 3),
197 flag_opt_xmalloc = (1U << 4)
198};
199static uint8_t malloc_slow_flags;
200
201#ifdef JEMALLOC_THREADED_INIT
202/* Used to let the initializing thread recursively allocate. */
203# define NO_INITIALIZER ((unsigned long)0)
204# define INITIALIZER pthread_self()
205# define IS_INITIALIZER (malloc_initializer == pthread_self())
206static pthread_t malloc_initializer = NO_INITIALIZER;
207#else
208# define NO_INITIALIZER false
209# define INITIALIZER true
210# define IS_INITIALIZER malloc_initializer
211static bool malloc_initializer = NO_INITIALIZER;
212#endif
213
214/* Used to avoid initialization races. */
215#ifdef _WIN32
216#if _WIN32_WINNT >= 0x0600
217static malloc_mutex_t init_lock = SRWLOCK_INIT;
218#else
219static malloc_mutex_t init_lock;
220static bool init_lock_initialized = false;
221
222JEMALLOC_ATTR(constructor)
223static void WINAPI
224_init_init_lock(void) {
225 /*
226 * If another constructor in the same binary is using mallctl to e.g.
227 * set up extent hooks, it may end up running before this one, and
228 * malloc_init_hard will crash trying to lock the uninitialized lock. So
229 * we force an initialization of the lock in malloc_init_hard as well.
230 * We don't try to care about atomicity of the accessed to the
231 * init_lock_initialized boolean, since it really only matters early in
232 * the process creation, before any separate thread normally starts
233 * doing anything.
234 */
235 if (!init_lock_initialized) {
236 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
237 malloc_mutex_rank_exclusive);
238 }
239 init_lock_initialized = true;
240}
241
242#ifdef _MSC_VER
243# pragma section(".CRT$XCU", read)
244JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
245static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
246#endif
247#endif
248#else
249static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
250#endif
251
252typedef struct {
253 void *p; /* Input pointer (as in realloc(p, s)). */
254 size_t s; /* Request size. */
255 void *r; /* Result pointer. */
256} malloc_utrace_t;
257
258#ifdef JEMALLOC_UTRACE
259# define UTRACE(a, b, c) do { \
260 if (unlikely(opt_utrace)) { \
261 int utrace_serrno = errno; \
262 malloc_utrace_t ut; \
263 ut.p = (a); \
264 ut.s = (b); \
265 ut.r = (c); \
266 UTRACE_CALL(&ut, sizeof(ut)); \
267 errno = utrace_serrno; \
268 } \
269} while (0)
270#else
271# define UTRACE(a, b, c)
272#endif
273
274/* Whether encountered any invalid config options. */
275static bool had_conf_error = false;
276
277/******************************************************************************/
278/*
279 * Function prototypes for static functions that are referenced prior to
280 * definition.
281 */
282
283static bool malloc_init_hard_a0(void);
284static bool malloc_init_hard(void);
285
286/******************************************************************************/
287/*
288 * Begin miscellaneous support functions.
289 */
290
291JEMALLOC_ALWAYS_INLINE bool
292malloc_init_a0(void) {
293 if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
294 return malloc_init_hard_a0();
295 }
296 return false;
297}
298
299JEMALLOC_ALWAYS_INLINE bool
300malloc_init(void) {
301 if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
302 return true;
303 }
304 return false;
305}
306
307/*
308 * The a0*() functions are used instead of i{d,}alloc() in situations that
309 * cannot tolerate TLS variable access.
310 */
311
312static void *
313a0ialloc(size_t size, bool zero, bool is_internal) {
314 if (unlikely(malloc_init_a0())) {
315 return NULL;
316 }
317
318 return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
319 is_internal, arena_get(TSDN_NULL, 0, true), true);
320}
321
322static void
323a0idalloc(void *ptr, bool is_internal) {
324 idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
325}
326
327void *
328a0malloc(size_t size) {
329 return a0ialloc(size, false, true);
330}
331
332void
333a0dalloc(void *ptr) {
334 a0idalloc(ptr, true);
335}
336
337/*
338 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-sensitive
339 * situations that cannot tolerate TLS variable access (TLS allocation and very
340 * early internal data structure initialization).
341 */
342
343void *
344bootstrap_malloc(size_t size) {
345 if (unlikely(size == 0)) {
346 size = 1;
347 }
348
349 return a0ialloc(size, false, false);
350}
351
352void *
353bootstrap_calloc(size_t num, size_t size) {
354 size_t num_size;
355
356 num_size = num * size;
357 if (unlikely(num_size == 0)) {
358 assert(num == 0 || size == 0);
359 num_size = 1;
360 }
361
362 return a0ialloc(num_size, true, false);
363}
364
365void
366bootstrap_free(void *ptr) {
367 if (unlikely(ptr == NULL)) {
368 return;
369 }
370
371 a0idalloc(ptr, false);
372}
373
374void
375arena_set(unsigned ind, arena_t *arena) {
376 atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
377}
378
379static void
380narenas_total_set(unsigned narenas) {
381 atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
382}
383
384static void
385narenas_total_inc(void) {
386 atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
387}
388
389unsigned
390narenas_total_get(void) {
391 return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
392}
393
394/* Create a new arena and insert it into the arenas array at index ind. */
395static arena_t *
396arena_init_locked(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
397 arena_t *arena;
398
399 assert(ind <= narenas_total_get());
400 if (ind >= MALLOCX_ARENA_LIMIT) {
401 return NULL;
402 }
403 if (ind == narenas_total_get()) {
404 narenas_total_inc();
405 }
406
407 /*
408 * Another thread may have already initialized arenas[ind] if it's an
409 * auto arena.
410 */
411 arena = arena_get(tsdn, ind, false);
412 if (arena != NULL) {
413 assert(arena_is_auto(arena));
414 return arena;
415 }
416
417 /* Actually initialize the arena. */
418 arena = arena_new(tsdn, ind, config);
419
420 return arena;
421}
422
423static void
424arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
425 if (ind == 0) {
426 return;
427 }
428 /*
429 * Avoid creating a new background thread just for the huge arena, which
430 * purges eagerly by default.
431 */
432 if (have_background_thread && !arena_is_huge(ind)) {
433 if (background_thread_create(tsdn_tsd(tsdn), ind)) {
434 malloc_printf("<jemalloc>: error in background thread "
435 "creation for arena %u. Abort.\n", ind);
436 abort();
437 }
438 }
439}
440
441arena_t *
442arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
443 arena_t *arena;
444
445 malloc_mutex_lock(tsdn, &arenas_lock);
446 arena = arena_init_locked(tsdn, ind, config);
447 malloc_mutex_unlock(tsdn, &arenas_lock);
448
449 arena_new_create_background_thread(tsdn, ind);
450
451 return arena;
452}
453
454static void
455arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
456 arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
457 arena_nthreads_inc(arena, internal);
458
459 if (internal) {
460 tsd_iarena_set(tsd, arena);
461 } else {
462 tsd_arena_set(tsd, arena);
463 unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
464 ATOMIC_RELAXED);
465 tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
466 for (unsigned i = 0; i < SC_NBINS; i++) {
467 assert(bin_infos[i].n_shards > 0 &&
468 bin_infos[i].n_shards <= BIN_SHARDS_MAX);
469 bins->binshard[i] = shard % bin_infos[i].n_shards;
470 }
471 }
472}
473
474void
475arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena) {
476 assert(oldarena != NULL);
477 assert(newarena != NULL);
478
479 arena_nthreads_dec(oldarena, false);
480 arena_nthreads_inc(newarena, false);
481 tsd_arena_set(tsd, newarena);
482
483 if (arena_nthreads_get(oldarena, false) == 0) {
484 /* Purge if the old arena has no associated threads anymore. */
485 arena_decay(tsd_tsdn(tsd), oldarena,
486 /* is_background_thread */ false, /* all */ true);
487 }
488}
489
490static void
491arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
492 arena_t *arena;
493
494 arena = arena_get(tsd_tsdn(tsd), ind, false);
495 arena_nthreads_dec(arena, internal);
496
497 if (internal) {
498 tsd_iarena_set(tsd, NULL);
499 } else {
500 tsd_arena_set(tsd, NULL);
501 }
502}
503
504/* Slow path, called only by arena_choose(). */
505arena_t *
506arena_choose_hard(tsd_t *tsd, bool internal) {
507 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
508
509 if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
510 unsigned choose = percpu_arena_choose();
511 ret = arena_get(tsd_tsdn(tsd), choose, true);
512 assert(ret != NULL);
513 arena_bind(tsd, arena_ind_get(ret), false);
514 arena_bind(tsd, arena_ind_get(ret), true);
515
516 return ret;
517 }
518
519 if (narenas_auto > 1) {
520 unsigned i, j, choose[2], first_null;
521 bool is_new_arena[2];
522
523 /*
524 * Determine binding for both non-internal and internal
525 * allocation.
526 *
527 * choose[0]: For application allocation.
528 * choose[1]: For internal metadata allocation.
529 */
530
531 for (j = 0; j < 2; j++) {
532 choose[j] = 0;
533 is_new_arena[j] = false;
534 }
535
536 first_null = narenas_auto;
537 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
538 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
539 for (i = 1; i < narenas_auto; i++) {
540 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
541 /*
542 * Choose the first arena that has the lowest
543 * number of threads assigned to it.
544 */
545 for (j = 0; j < 2; j++) {
546 if (arena_nthreads_get(arena_get(
547 tsd_tsdn(tsd), i, false), !!j) <
548 arena_nthreads_get(arena_get(
549 tsd_tsdn(tsd), choose[j], false),
550 !!j)) {
551 choose[j] = i;
552 }
553 }
554 } else if (first_null == narenas_auto) {
555 /*
556 * Record the index of the first uninitialized
557 * arena, in case all extant arenas are in use.
558 *
559 * NB: It is possible for there to be
560 * discontinuities in terms of initialized
561 * versus uninitialized arenas, due to the
562 * "thread.arena" mallctl.
563 */
564 first_null = i;
565 }
566 }
567
568 for (j = 0; j < 2; j++) {
569 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
570 choose[j], false), !!j) == 0 || first_null ==
571 narenas_auto) {
572 /*
573 * Use an unloaded arena, or the least loaded
574 * arena if all arenas are already initialized.
575 */
576 if (!!j == internal) {
577 ret = arena_get(tsd_tsdn(tsd),
578 choose[j], false);
579 }
580 } else {
581 arena_t *arena;
582
583 /* Initialize a new arena. */
584 choose[j] = first_null;
585 arena = arena_init_locked(tsd_tsdn(tsd),
586 choose[j], &arena_config_default);
587 if (arena == NULL) {
588 malloc_mutex_unlock(tsd_tsdn(tsd),
589 &arenas_lock);
590 return NULL;
591 }
592 is_new_arena[j] = true;
593 if (!!j == internal) {
594 ret = arena;
595 }
596 }
597 arena_bind(tsd, choose[j], !!j);
598 }
599 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
600
601 for (j = 0; j < 2; j++) {
602 if (is_new_arena[j]) {
603 assert(choose[j] > 0);
604 arena_new_create_background_thread(
605 tsd_tsdn(tsd), choose[j]);
606 }
607 }
608
609 } else {
610 ret = arena_get(tsd_tsdn(tsd), 0, false);
611 arena_bind(tsd, 0, false);
612 arena_bind(tsd, 0, true);
613 }
614
615 return ret;
616}
617
618void
619iarena_cleanup(tsd_t *tsd) {
620 arena_t *iarena;
621
622 iarena = tsd_iarena_get(tsd);
623 if (iarena != NULL) {
624 arena_unbind(tsd, arena_ind_get(iarena), true);
625 }
626}
627
628void
629arena_cleanup(tsd_t *tsd) {
630 arena_t *arena;
631
632 arena = tsd_arena_get(tsd);
633 if (arena != NULL) {
634 arena_unbind(tsd, arena_ind_get(arena), false);
635 }
636}
637
638static void
639stats_print_atexit(void) {
640 if (config_stats) {
641 tsdn_t *tsdn;
642 unsigned narenas, i;
643
644 tsdn = tsdn_fetch();
645
646 /*
647 * Merge stats from extant threads. This is racy, since
648 * individual threads do not lock when recording tcache stats
649 * events. As a consequence, the final stats may be slightly
650 * out of date by the time they are reported, if other threads
651 * continue to allocate.
652 */
653 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
654 arena_t *arena = arena_get(tsdn, i, false);
655 if (arena != NULL) {
656 tcache_slow_t *tcache_slow;
657
658 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
659 ql_foreach(tcache_slow, &arena->tcache_ql,
660 link) {
661 tcache_stats_merge(tsdn,
662 tcache_slow->tcache, arena);
663 }
664 malloc_mutex_unlock(tsdn,
665 &arena->tcache_ql_mtx);
666 }
667 }
668 }
669 je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
670}
671
672/*
673 * Ensure that we don't hold any locks upon entry to or exit from allocator
674 * code (in a "broad" sense that doesn't count a reentrant allocation as an
675 * entrance or exit).
676 */
677JEMALLOC_ALWAYS_INLINE void
678check_entry_exit_locking(tsdn_t *tsdn) {
679 if (!config_debug) {
680 return;
681 }
682 if (tsdn_null(tsdn)) {
683 return;
684 }
685 tsd_t *tsd = tsdn_tsd(tsdn);
686 /*
687 * It's possible we hold locks at entry/exit if we're in a nested
688 * allocation.
689 */
690 int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
691 if (reentrancy_level != 0) {
692 return;
693 }
694 witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
695}
696
697/*
698 * End miscellaneous support functions.
699 */
700/******************************************************************************/
701/*
702 * Begin initialization functions.
703 */
704
705static char *
706jemalloc_secure_getenv(const char *name) {
707#ifdef JEMALLOC_HAVE_SECURE_GETENV
708 return secure_getenv(name);
709#else
710# ifdef JEMALLOC_HAVE_ISSETUGID
711 if (issetugid() != 0) {
712 return NULL;
713 }
714# endif
715 return getenv(name);
716#endif
717}
718
719static unsigned
720malloc_ncpus(void) {
721 long result;
722
723#ifdef _WIN32
724 SYSTEM_INFO si;
725 GetSystemInfo(&si);
726 result = si.dwNumberOfProcessors;
727#elif defined(CPU_COUNT)
728 /*
729 * glibc >= 2.6 has the CPU_COUNT macro.
730 *
731 * glibc's sysconf() uses isspace(). glibc allocates for the first time
732 * *before* setting up the isspace tables. Therefore we need a
733 * different method to get the number of CPUs.
734 *
735 * The getaffinity approach is also preferred when only a subset of CPUs
736 * is available, to avoid using more arenas than necessary.
737 */
738 {
739# if defined(__FreeBSD__) || defined(__DragonFly__)
740 cpuset_t set;
741# else
742 cpu_set_t set;
743# endif
744# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
745 sched_getaffinity(0, sizeof(set), &set);
746# else
747 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
748# endif
749 result = CPU_COUNT(&set);
750 }
751#else
752 result = sysconf(_SC_NPROCESSORS_ONLN);
753#endif
754 return ((result == -1) ? 1 : (unsigned)result);
755}
756
757/*
758 * Ensure that number of CPUs is determistinc, i.e. it is the same based on:
759 * - sched_getaffinity()
760 * - _SC_NPROCESSORS_ONLN
761 * - _SC_NPROCESSORS_CONF
762 * Since otherwise tricky things is possible with percpu arenas in use.
763 */
764static bool
765malloc_cpu_count_is_deterministic()
766{
767#ifdef _WIN32
768 return true;
769#else
770 long cpu_onln = sysconf(_SC_NPROCESSORS_ONLN);
771 long cpu_conf = sysconf(_SC_NPROCESSORS_CONF);
772 if (cpu_onln != cpu_conf) {
773 return false;
774 }
775# if defined(CPU_COUNT)
776# if defined(__FreeBSD__) || defined(__DragonFly__)
777 cpuset_t set;
778# else
779 cpu_set_t set;
780# endif /* __FreeBSD__ */
781# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
782 sched_getaffinity(0, sizeof(set), &set);
783# else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */
784 pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
785# endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */
786 long cpu_affinity = CPU_COUNT(&set);
787 if (cpu_affinity != cpu_conf) {
788 return false;
789 }
790# endif /* CPU_COUNT */
791 return true;
792#endif
793}
794
795static void
796init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
797 size_t opts_len = strlen(dest);
798 assert(opts_len <= stats_print_tot_num_options);
799
800 for (size_t i = 0; i < vlen; i++) {
801 switch (v[i]) {
802#define OPTION(o, v, d, s) case o: break;
803 STATS_PRINT_OPTIONS
804#undef OPTION
805 default: continue;
806 }
807
808 if (strchr(dest, v[i]) != NULL) {
809 /* Ignore repeated. */
810 continue;
811 }
812
813 dest[opts_len++] = v[i];
814 dest[opts_len] = '\0';
815 assert(opts_len <= stats_print_tot_num_options);
816 }
817 assert(opts_len == strlen(dest));
818}
819
820/* Reads the next size pair in a multi-sized option. */
821static bool
822malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
823 size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
824 const char *cur = *slab_size_segment_cur;
825 char *end;
826 uintmax_t um;
827
828 set_errno(0);
829
830 /* First number, then '-' */
831 um = malloc_strtoumax(cur, &end, 0);
832 if (get_errno() != 0 || *end != '-') {
833 return true;
834 }
835 *slab_start = (size_t)um;
836 cur = end + 1;
837
838 /* Second number, then ':' */
839 um = malloc_strtoumax(cur, &end, 0);
840 if (get_errno() != 0 || *end != ':') {
841 return true;
842 }
843 *slab_end = (size_t)um;
844 cur = end + 1;
845
846 /* Last number */
847 um = malloc_strtoumax(cur, &end, 0);
848 if (get_errno() != 0) {
849 return true;
850 }
851 *new_size = (size_t)um;
852
853 /* Consume the separator if there is one. */
854 if (*end == '|') {
855 end++;
856 }
857
858 *vlen_left -= end - *slab_size_segment_cur;
859 *slab_size_segment_cur = end;
860
861 return false;
862}
863
864static bool
865malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
866 char const **v_p, size_t *vlen_p) {
867 bool accept;
868 const char *opts = *opts_p;
869
870 *k_p = opts;
871
872 for (accept = false; !accept;) {
873 switch (*opts) {
874 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
875 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
876 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
877 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
878 case 'Y': case 'Z':
879 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
880 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
881 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
882 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
883 case 'y': case 'z':
884 case '0': case '1': case '2': case '3': case '4': case '5':
885 case '6': case '7': case '8': case '9':
886 case '_':
887 opts++;
888 break;
889 case ':':
890 opts++;
891 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
892 *v_p = opts;
893 accept = true;
894 break;
895 case '\0':
896 if (opts != *opts_p) {
897 malloc_write("<jemalloc>: Conf string ends "
898 "with key\n");
899 had_conf_error = true;
900 }
901 return true;
902 default:
903 malloc_write("<jemalloc>: Malformed conf string\n");
904 had_conf_error = true;
905 return true;
906 }
907 }
908
909 for (accept = false; !accept;) {
910 switch (*opts) {
911 case ',':
912 opts++;
913 /*
914 * Look ahead one character here, because the next time
915 * this function is called, it will assume that end of
916 * input has been cleanly reached if no input remains,
917 * but we have optimistically already consumed the
918 * comma if one exists.
919 */
920 if (*opts == '\0') {
921 malloc_write("<jemalloc>: Conf string ends "
922 "with comma\n");
923 had_conf_error = true;
924 }
925 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
926 accept = true;
927 break;
928 case '\0':
929 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
930 accept = true;
931 break;
932 default:
933 opts++;
934 break;
935 }
936 }
937
938 *opts_p = opts;
939 return false;
940}
941
942static void
943malloc_abort_invalid_conf(void) {
944 assert(opt_abort_conf);
945 malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
946 "value (see above).\n");
947 abort();
948}
949
950static void
951malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
952 size_t vlen) {
953 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
954 (int)vlen, v);
955 /* If abort_conf is set, error out after processing all options. */
956 const char *experimental = "experimental_";
957 if (strncmp(k, experimental, strlen(experimental)) == 0) {
958 /* However, tolerate experimental features. */
959 return;
960 }
961 had_conf_error = true;
962}
963
964static void
965malloc_slow_flag_init(void) {
966 /*
967 * Combine the runtime options into malloc_slow for fast path. Called
968 * after processing all the options.
969 */
970 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
971 | (opt_junk_free ? flag_opt_junk_free : 0)
972 | (opt_zero ? flag_opt_zero : 0)
973 | (opt_utrace ? flag_opt_utrace : 0)
974 | (opt_xmalloc ? flag_opt_xmalloc : 0);
975
976 malloc_slow = (malloc_slow_flags != 0);
977}
978
979/* Number of sources for initializing malloc_conf */
980#define MALLOC_CONF_NSOURCES 5
981
982static const char *
983obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
984 if (config_debug) {
985 static unsigned read_source = 0;
986 /*
987 * Each source should only be read once, to minimize # of
988 * syscalls on init.
989 */
990 assert(read_source++ == which_source);
991 }
992 assert(which_source < MALLOC_CONF_NSOURCES);
993
994 const char *ret;
995 switch (which_source) {
996 case 0:
997 ret = config_malloc_conf;
998 break;
999 case 1:
1000 if (je_malloc_conf != NULL) {
1001 /* Use options that were compiled into the program. */
1002 ret = je_malloc_conf;
1003 } else {
1004 /* No configuration specified. */
1005 ret = NULL;
1006 }
1007 break;
1008 case 2: {
1009 ssize_t linklen = 0;
1010#ifndef _WIN32
1011 int saved_errno = errno;
1012 const char *linkname =
1013# ifdef JEMALLOC_PREFIX
1014 "/etc/"JEMALLOC_PREFIX"malloc.conf"
1015# else
1016 "/etc/malloc.conf"
1017# endif
1018 ;
1019
1020 /*
1021 * Try to use the contents of the "/etc/malloc.conf" symbolic
1022 * link's name.
1023 */
1024#ifndef JEMALLOC_READLINKAT
1025 linklen = readlink(linkname, buf, PATH_MAX);
1026#else
1027 linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX);
1028#endif
1029 if (linklen == -1) {
1030 /* No configuration specified. */
1031 linklen = 0;
1032 /* Restore errno. */
1033 set_errno(saved_errno);
1034 }
1035#endif
1036 buf[linklen] = '\0';
1037 ret = buf;
1038 break;
1039 } case 3: {
1040 const char *envname =
1041#ifdef JEMALLOC_PREFIX
1042 JEMALLOC_CPREFIX"MALLOC_CONF"
1043#else
1044 "MALLOC_CONF"
1045#endif
1046 ;
1047
1048 if ((ret = jemalloc_secure_getenv(envname)) != NULL) {
1049 /*
1050 * Do nothing; opts is already initialized to the value
1051 * of the MALLOC_CONF environment variable.
1052 */
1053 } else {
1054 /* No configuration specified. */
1055 ret = NULL;
1056 }
1057 break;
1058 } case 4: {
1059 ret = je_malloc_conf_2_conf_harder;
1060 break;
1061 } default:
1062 not_reached();
1063 ret = NULL;
1064 }
1065 return ret;
1066}
1067
1068static void
1069malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
1070 bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES],
1071 char buf[PATH_MAX + 1]) {
1072 static const char *opts_explain[MALLOC_CONF_NSOURCES] = {
1073 "string specified via --with-malloc-conf",
1074 "string pointed to by the global variable malloc_conf",
1075 "\"name\" of the file referenced by the symbolic link named "
1076 "/etc/malloc.conf",
1077 "value of the environment variable MALLOC_CONF",
1078 "string pointed to by the global variable "
1079 "malloc_conf_2_conf_harder",
1080 };
1081 unsigned i;
1082 const char *opts, *k, *v;
1083 size_t klen, vlen;
1084
1085 for (i = 0; i < MALLOC_CONF_NSOURCES; i++) {
1086 /* Get runtime configuration. */
1087 if (initial_call) {
1088 opts_cache[i] = obtain_malloc_conf(i, buf);
1089 }
1090 opts = opts_cache[i];
1091 if (!initial_call && opt_confirm_conf) {
1092 malloc_printf(
1093 "<jemalloc>: malloc_conf #%u (%s): \"%s\"\n",
1094 i + 1, opts_explain[i], opts != NULL ? opts : "");
1095 }
1096 if (opts == NULL) {
1097 continue;
1098 }
1099
1100 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
1101 &vlen)) {
1102
1103#define CONF_ERROR(msg, k, klen, v, vlen) \
1104 if (!initial_call) { \
1105 malloc_conf_error( \
1106 msg, k, klen, v, vlen); \
1107 cur_opt_valid = false; \
1108 }
1109#define CONF_CONTINUE { \
1110 if (!initial_call && opt_confirm_conf \
1111 && cur_opt_valid) { \
1112 malloc_printf("<jemalloc>: -- " \
1113 "Set conf value: %.*s:%.*s" \
1114 "\n", (int)klen, k, \
1115 (int)vlen, v); \
1116 } \
1117 continue; \
1118 }
1119#define CONF_MATCH(n) \
1120 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
1121#define CONF_MATCH_VALUE(n) \
1122 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
1123#define CONF_HANDLE_BOOL(o, n) \
1124 if (CONF_MATCH(n)) { \
1125 if (CONF_MATCH_VALUE("true")) { \
1126 o = true; \
1127 } else if (CONF_MATCH_VALUE("false")) { \
1128 o = false; \
1129 } else { \
1130 CONF_ERROR("Invalid conf value",\
1131 k, klen, v, vlen); \
1132 } \
1133 CONF_CONTINUE; \
1134 }
1135 /*
1136 * One of the CONF_MIN macros below expands, in one of the use points,
1137 * to "unsigned integer < 0", which is always false, triggering the
1138 * GCC -Wtype-limits warning, which we disable here and re-enable below.
1139 */
1140 JEMALLOC_DIAGNOSTIC_PUSH
1141 JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
1142
1143#define CONF_DONT_CHECK_MIN(um, min) false
1144#define CONF_CHECK_MIN(um, min) ((um) < (min))
1145#define CONF_DONT_CHECK_MAX(um, max) false
1146#define CONF_CHECK_MAX(um, max) ((um) > (max))
1147
1148#define CONF_VALUE_READ(max_t, result) \
1149 char *end; \
1150 set_errno(0); \
1151 result = (max_t)malloc_strtoumax(v, &end, 0);
1152#define CONF_VALUE_READ_FAIL() \
1153 (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen)
1154
1155#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \
1156 if (CONF_MATCH(n)) { \
1157 max_t mv; \
1158 CONF_VALUE_READ(max_t, mv) \
1159 if (CONF_VALUE_READ_FAIL()) { \
1160 CONF_ERROR("Invalid conf value",\
1161 k, klen, v, vlen); \
1162 } else if (clip) { \
1163 if (check_min(mv, (t)(min))) { \
1164 o = (t)(min); \
1165 } else if ( \
1166 check_max(mv, (t)(max))) { \
1167 o = (t)(max); \
1168 } else { \
1169 o = (t)mv; \
1170 } \
1171 } else { \
1172 if (check_min(mv, (t)(min)) || \
1173 check_max(mv, (t)(max))) { \
1174 CONF_ERROR( \
1175 "Out-of-range " \
1176 "conf value", \
1177 k, klen, v, vlen); \
1178 } else { \
1179 o = (t)mv; \
1180 } \
1181 } \
1182 CONF_CONTINUE; \
1183 }
1184#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
1185 CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \
1186 check_max, clip)
1187#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\
1188 CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \
1189 check_max, clip)
1190
1191#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
1192 clip) \
1193 CONF_HANDLE_T_U(unsigned, o, n, min, max, \
1194 check_min, check_max, clip)
1195#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
1196 CONF_HANDLE_T_U(size_t, o, n, min, max, \
1197 check_min, check_max, clip)
1198#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \
1199 CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \
1200 check_min, check_max, clip)
1201#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\
1202 CONF_HANDLE_T_U(uint64_t, o, n, min, max, \
1203 check_min, check_max, clip)
1204#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1205 CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \
1206 CONF_CHECK_MIN, CONF_CHECK_MAX, false)
1207#define CONF_HANDLE_CHAR_P(o, n, d) \
1208 if (CONF_MATCH(n)) { \
1209 size_t cpylen = (vlen <= \
1210 sizeof(o)-1) ? vlen : \
1211 sizeof(o)-1; \
1212 strncpy(o, v, cpylen); \
1213 o[cpylen] = '\0'; \
1214 CONF_CONTINUE; \
1215 }
1216
1217 bool cur_opt_valid = true;
1218
1219 CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf")
1220 if (initial_call) {
1221 continue;
1222 }
1223
1224 CONF_HANDLE_BOOL(opt_abort, "abort")
1225 CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1226 CONF_HANDLE_BOOL(opt_cache_oblivious, "cache_oblivious")
1227 CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
1228 if (strncmp("metadata_thp", k, klen) == 0) {
1229 int m;
1230 bool match = false;
1231 for (m = 0; m < metadata_thp_mode_limit; m++) {
1232 if (strncmp(metadata_thp_mode_names[m],
1233 v, vlen) == 0) {
1234 opt_metadata_thp = m;
1235 match = true;
1236 break;
1237 }
1238 }
1239 if (!match) {
1240 CONF_ERROR("Invalid conf value",
1241 k, klen, v, vlen);
1242 }
1243 CONF_CONTINUE;
1244 }
1245 CONF_HANDLE_BOOL(opt_retain, "retain")
1246 if (strncmp("dss", k, klen) == 0) {
1247 int m;
1248 bool match = false;
1249 for (m = 0; m < dss_prec_limit; m++) {
1250 if (strncmp(dss_prec_names[m], v, vlen)
1251 == 0) {
1252 if (extent_dss_prec_set(m)) {
1253 CONF_ERROR(
1254 "Error setting dss",
1255 k, klen, v, vlen);
1256 } else {
1257 opt_dss =
1258 dss_prec_names[m];
1259 match = true;
1260 break;
1261 }
1262 }
1263 }
1264 if (!match) {
1265 CONF_ERROR("Invalid conf value",
1266 k, klen, v, vlen);
1267 }
1268 CONF_CONTINUE;
1269 }
1270 if (CONF_MATCH("narenas")) {
1271 if (CONF_MATCH_VALUE("default")) {
1272 opt_narenas = 0;
1273 CONF_CONTINUE;
1274 } else {
1275 CONF_HANDLE_UNSIGNED(opt_narenas,
1276 "narenas", 1, UINT_MAX,
1277 CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
1278 /* clip */ false)
1279 }
1280 }
1281 if (CONF_MATCH("narenas_ratio")) {
1282 char *end;
1283 bool err = fxp_parse(&opt_narenas_ratio, v,
1284 &end);
1285 if (err || (size_t)(end - v) != vlen) {
1286 CONF_ERROR("Invalid conf value",
1287 k, klen, v, vlen);
1288 }
1289 CONF_CONTINUE;
1290 }
1291 if (CONF_MATCH("bin_shards")) {
1292 const char *bin_shards_segment_cur = v;
1293 size_t vlen_left = vlen;
1294 do {
1295 size_t size_start;
1296 size_t size_end;
1297 size_t nshards;
1298 bool err = malloc_conf_multi_sizes_next(
1299 &bin_shards_segment_cur, &vlen_left,
1300 &size_start, &size_end, &nshards);
1301 if (err || bin_update_shard_size(
1302 bin_shard_sizes, size_start,
1303 size_end, nshards)) {
1304 CONF_ERROR(
1305 "Invalid settings for "
1306 "bin_shards", k, klen, v,
1307 vlen);
1308 break;
1309 }
1310 } while (vlen_left > 0);
1311 CONF_CONTINUE;
1312 }
1313 CONF_HANDLE_INT64_T(opt_mutex_max_spin,
1314 "mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN,
1315 CONF_DONT_CHECK_MAX, false);
1316 CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1317 "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1318 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1319 SSIZE_MAX);
1320 CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1321 "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1322 QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1323 SSIZE_MAX);
1324 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1325 if (CONF_MATCH("stats_print_opts")) {
1326 init_opt_stats_opts(v, vlen,
1327 opt_stats_print_opts);
1328 CONF_CONTINUE;
1329 }
1330 CONF_HANDLE_INT64_T(opt_stats_interval,
1331 "stats_interval", -1, INT64_MAX,
1332 CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
1333 if (CONF_MATCH("stats_interval_opts")) {
1334 init_opt_stats_opts(v, vlen,
1335 opt_stats_interval_opts);
1336 CONF_CONTINUE;
1337 }
1338 if (config_fill) {
1339 if (CONF_MATCH("junk")) {
1340 if (CONF_MATCH_VALUE("true")) {
1341 opt_junk = "true";
1342 opt_junk_alloc = opt_junk_free =
1343 true;
1344 } else if (CONF_MATCH_VALUE("false")) {
1345 opt_junk = "false";
1346 opt_junk_alloc = opt_junk_free =
1347 false;
1348 } else if (CONF_MATCH_VALUE("alloc")) {
1349 opt_junk = "alloc";
1350 opt_junk_alloc = true;
1351 opt_junk_free = false;
1352 } else if (CONF_MATCH_VALUE("free")) {
1353 opt_junk = "free";
1354 opt_junk_alloc = false;
1355 opt_junk_free = true;
1356 } else {
1357 CONF_ERROR(
1358 "Invalid conf value",
1359 k, klen, v, vlen);
1360 }
1361 CONF_CONTINUE;
1362 }
1363 CONF_HANDLE_BOOL(opt_zero, "zero")
1364 }
1365 if (config_utrace) {
1366 CONF_HANDLE_BOOL(opt_utrace, "utrace")
1367 }
1368 if (config_xmalloc) {
1369 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1370 }
1371 if (config_enable_cxx) {
1372 CONF_HANDLE_BOOL(
1373 opt_experimental_infallible_new,
1374 "experimental_infallible_new")
1375 }
1376
1377 CONF_HANDLE_BOOL(opt_tcache, "tcache")
1378 CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max",
1379 0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN,
1380 CONF_CHECK_MAX, /* clip */ true)
1381 if (CONF_MATCH("lg_tcache_max")) {
1382 size_t m;
1383 CONF_VALUE_READ(size_t, m)
1384 if (CONF_VALUE_READ_FAIL()) {
1385 CONF_ERROR("Invalid conf value",
1386 k, klen, v, vlen);
1387 } else {
1388 /* clip if necessary */
1389 if (m > TCACHE_LG_MAXCLASS_LIMIT) {
1390 m = TCACHE_LG_MAXCLASS_LIMIT;
1391 }
1392 opt_tcache_max = (size_t)1 << m;
1393 }
1394 CONF_CONTINUE;
1395 }
1396 /*
1397 * Anyone trying to set a value outside -16 to 16 is
1398 * deeply confused.
1399 */
1400 CONF_HANDLE_SSIZE_T(opt_lg_tcache_nslots_mul,
1401 "lg_tcache_nslots_mul", -16, 16)
1402 /* Ditto with values past 2048. */
1403 CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min,
1404 "tcache_nslots_small_min", 1, 2048,
1405 CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
1406 CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max,
1407 "tcache_nslots_small_max", 1, 2048,
1408 CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
1409 CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large,
1410 "tcache_nslots_large", 1, 2048,
1411 CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
1412 CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes,
1413 "tcache_gc_incr_bytes", 1024, SIZE_T_MAX,
1414 CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
1415 /* clip */ true)
1416 CONF_HANDLE_SIZE_T(opt_tcache_gc_delay_bytes,
1417 "tcache_gc_delay_bytes", 0, SIZE_T_MAX,
1418 CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
1419 /* clip */ false)
1420 CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div,
1421 "lg_tcache_flush_small_div", 1, 16,
1422 CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
1423 CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div,
1424 "lg_tcache_flush_large_div", 1, 16,
1425 CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
1426 CONF_HANDLE_UNSIGNED(opt_debug_double_free_max_scan,
1427 "debug_double_free_max_scan", 0, UINT_MAX,
1428 CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
1429 /* clip */ false)
1430
1431 /*
1432 * The runtime option of oversize_threshold remains
1433 * undocumented. It may be tweaked in the next major
1434 * release (6.0). The default value 8M is rather
1435 * conservative / safe. Tuning it further down may
1436 * improve fragmentation a bit more, but may also cause
1437 * contention on the huge arena.
1438 */
1439 CONF_HANDLE_SIZE_T(opt_oversize_threshold,
1440 "oversize_threshold", 0, SC_LARGE_MAXCLASS,
1441 CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false)
1442 CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1443 "lg_extent_max_active_fit", 0,
1444 (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN,
1445 CONF_CHECK_MAX, false)
1446
1447 if (strncmp("percpu_arena", k, klen) == 0) {
1448 bool match = false;
1449 for (int m = percpu_arena_mode_names_base; m <
1450 percpu_arena_mode_names_limit; m++) {
1451 if (strncmp(percpu_arena_mode_names[m],
1452 v, vlen) == 0) {
1453 if (!have_percpu_arena) {
1454 CONF_ERROR(
1455 "No getcpu support",
1456 k, klen, v, vlen);
1457 }
1458 opt_percpu_arena = m;
1459 match = true;
1460 break;
1461 }
1462 }
1463 if (!match) {
1464 CONF_ERROR("Invalid conf value",
1465 k, klen, v, vlen);
1466 }
1467 CONF_CONTINUE;
1468 }
1469 CONF_HANDLE_BOOL(opt_background_thread,
1470 "background_thread");
1471 CONF_HANDLE_SIZE_T(opt_max_background_threads,
1472 "max_background_threads", 1,
1473 opt_max_background_threads,
1474 CONF_CHECK_MIN, CONF_CHECK_MAX,
1475 true);
1476 CONF_HANDLE_BOOL(opt_hpa, "hpa")
1477 CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc,
1478 "hpa_slab_max_alloc", PAGE, HUGEPAGE,
1479 CONF_CHECK_MIN, CONF_CHECK_MAX, true);
1480
1481 /*
1482 * Accept either a ratio-based or an exact hugification
1483 * threshold.
1484 */
1485 CONF_HANDLE_SIZE_T(opt_hpa_opts.hugification_threshold,
1486 "hpa_hugification_threshold", PAGE, HUGEPAGE,
1487 CONF_CHECK_MIN, CONF_CHECK_MAX, true);
1488 if (CONF_MATCH("hpa_hugification_threshold_ratio")) {
1489 fxp_t ratio;
1490 char *end;
1491 bool err = fxp_parse(&ratio, v,
1492 &end);
1493 if (err || (size_t)(end - v) != vlen
1494 || ratio > FXP_INIT_INT(1)) {
1495 CONF_ERROR("Invalid conf value",
1496 k, klen, v, vlen);
1497 } else {
1498 opt_hpa_opts.hugification_threshold =
1499 fxp_mul_frac(HUGEPAGE, ratio);
1500 }
1501 CONF_CONTINUE;
1502 }
1503
1504 CONF_HANDLE_UINT64_T(
1505 opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms",
1506 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
1507 false);
1508
1509 CONF_HANDLE_UINT64_T(
1510 opt_hpa_opts.min_purge_interval_ms,
1511 "hpa_min_purge_interval_ms", 0, 0,
1512 CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false);
1513
1514 if (CONF_MATCH("hpa_dirty_mult")) {
1515 if (CONF_MATCH_VALUE("-1")) {
1516 opt_hpa_opts.dirty_mult = (fxp_t)-1;
1517 CONF_CONTINUE;
1518 }
1519 fxp_t ratio;
1520 char *end;
1521 bool err = fxp_parse(&ratio, v,
1522 &end);
1523 if (err || (size_t)(end - v) != vlen) {
1524 CONF_ERROR("Invalid conf value",
1525 k, klen, v, vlen);
1526 } else {
1527 opt_hpa_opts.dirty_mult = ratio;
1528 }
1529 CONF_CONTINUE;
1530 }
1531
1532 CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.nshards,
1533 "hpa_sec_nshards", 0, 0, CONF_CHECK_MIN,
1534 CONF_DONT_CHECK_MAX, true);
1535 CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc,
1536 "hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN,
1537 CONF_DONT_CHECK_MAX, true);
1538 CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes,
1539 "hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN,
1540 CONF_DONT_CHECK_MAX, true);
1541 CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.bytes_after_flush,
1542 "hpa_sec_bytes_after_flush", PAGE, 0,
1543 CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true);
1544 CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra,
1545 "hpa_sec_batch_fill_extra", 0, HUGEPAGE_PAGES,
1546 CONF_CHECK_MIN, CONF_CHECK_MAX, true);
1547
1548 if (CONF_MATCH("slab_sizes")) {
1549 if (CONF_MATCH_VALUE("default")) {
1550 sc_data_init(sc_data);
1551 CONF_CONTINUE;
1552 }
1553 bool err;
1554 const char *slab_size_segment_cur = v;
1555 size_t vlen_left = vlen;
1556 do {
1557 size_t slab_start;
1558 size_t slab_end;
1559 size_t pgs;
1560 err = malloc_conf_multi_sizes_next(
1561 &slab_size_segment_cur,
1562 &vlen_left, &slab_start, &slab_end,
1563 &pgs);
1564 if (!err) {
1565 sc_data_update_slab_size(
1566 sc_data, slab_start,
1567 slab_end, (int)pgs);
1568 } else {
1569 CONF_ERROR("Invalid settings "
1570 "for slab_sizes",
1571 k, klen, v, vlen);
1572 }
1573 } while (!err && vlen_left > 0);
1574 CONF_CONTINUE;
1575 }
1576 if (config_prof) {
1577 CONF_HANDLE_BOOL(opt_prof, "prof")
1578 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1579 "prof_prefix", "jeprof")
1580 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1581 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1582 "prof_thread_active_init")
1583 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1584 "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1585 - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX,
1586 true)
1587 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1588 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1589 "lg_prof_interval", -1,
1590 (sizeof(uint64_t) << 3) - 1)
1591 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1592 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1593 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1594 CONF_HANDLE_BOOL(opt_prof_leak_error,
1595 "prof_leak_error")
1596 CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
1597 CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max,
1598 "prof_recent_alloc_max", -1, SSIZE_MAX)
1599 CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats")
1600 CONF_HANDLE_BOOL(opt_prof_sys_thread_name,
1601 "prof_sys_thread_name")
1602 if (CONF_MATCH("prof_time_resolution")) {
1603 if (CONF_MATCH_VALUE("default")) {
1604 opt_prof_time_res =
1605 prof_time_res_default;
1606 } else if (CONF_MATCH_VALUE("high")) {
1607 if (!config_high_res_timer) {
1608 CONF_ERROR(
1609 "No high resolution"
1610 " timer support",
1611 k, klen, v, vlen);
1612 } else {
1613 opt_prof_time_res =
1614 prof_time_res_high;
1615 }
1616 } else {
1617 CONF_ERROR("Invalid conf value",
1618 k, klen, v, vlen);
1619 }
1620 CONF_CONTINUE;
1621 }
1622 /*
1623 * Undocumented. When set to false, don't
1624 * correct for an unbiasing bug in jeprof
1625 * attribution. This can be handy if you want
1626 * to get consistent numbers from your binary
1627 * across different jemalloc versions, even if
1628 * those numbers are incorrect. The default is
1629 * true.
1630 */
1631 CONF_HANDLE_BOOL(opt_prof_unbias, "prof_unbias")
1632 }
1633 if (config_log) {
1634 if (CONF_MATCH("log")) {
1635 size_t cpylen = (
1636 vlen <= sizeof(log_var_names) ?
1637 vlen : sizeof(log_var_names) - 1);
1638 strncpy(log_var_names, v, cpylen);
1639 log_var_names[cpylen] = '\0';
1640 CONF_CONTINUE;
1641 }
1642 }
1643 if (CONF_MATCH("thp")) {
1644 bool match = false;
1645 for (int m = 0; m < thp_mode_names_limit; m++) {
1646 if (strncmp(thp_mode_names[m],v, vlen)
1647 == 0) {
1648 if (!have_madvise_huge && !have_memcntl) {
1649 CONF_ERROR(
1650 "No THP support",
1651 k, klen, v, vlen);
1652 }
1653 opt_thp = m;
1654 match = true;
1655 break;
1656 }
1657 }
1658 if (!match) {
1659 CONF_ERROR("Invalid conf value",
1660 k, klen, v, vlen);
1661 }
1662 CONF_CONTINUE;
1663 }
1664 if (CONF_MATCH("zero_realloc")) {
1665 if (CONF_MATCH_VALUE("alloc")) {
1666 opt_zero_realloc_action
1667 = zero_realloc_action_alloc;
1668 } else if (CONF_MATCH_VALUE("free")) {
1669 opt_zero_realloc_action
1670 = zero_realloc_action_free;
1671 } else if (CONF_MATCH_VALUE("abort")) {
1672 opt_zero_realloc_action
1673 = zero_realloc_action_abort;
1674 } else {
1675 CONF_ERROR("Invalid conf value",
1676 k, klen, v, vlen);
1677 }
1678 CONF_CONTINUE;
1679 }
1680 if (config_uaf_detection &&
1681 CONF_MATCH("lg_san_uaf_align")) {
1682 ssize_t a;
1683 CONF_VALUE_READ(ssize_t, a)
1684 if (CONF_VALUE_READ_FAIL() || a < -1) {
1685 CONF_ERROR("Invalid conf value",
1686 k, klen, v, vlen);
1687 }
1688 if (a == -1) {
1689 opt_lg_san_uaf_align = -1;
1690 CONF_CONTINUE;
1691 }
1692
1693 /* clip if necessary */
1694 ssize_t max_allowed = (sizeof(size_t) << 3) - 1;
1695 ssize_t min_allowed = LG_PAGE;
1696 if (a > max_allowed) {
1697 a = max_allowed;
1698 } else if (a < min_allowed) {
1699 a = min_allowed;
1700 }
1701
1702 opt_lg_san_uaf_align = a;
1703 CONF_CONTINUE;
1704 }
1705
1706 CONF_HANDLE_SIZE_T(opt_san_guard_small,
1707 "san_guard_small", 0, SIZE_T_MAX,
1708 CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
1709 CONF_HANDLE_SIZE_T(opt_san_guard_large,
1710 "san_guard_large", 0, SIZE_T_MAX,
1711 CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
1712
1713 CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
1714#undef CONF_ERROR
1715#undef CONF_CONTINUE
1716#undef CONF_MATCH
1717#undef CONF_MATCH_VALUE
1718#undef CONF_HANDLE_BOOL
1719#undef CONF_DONT_CHECK_MIN
1720#undef CONF_CHECK_MIN
1721#undef CONF_DONT_CHECK_MAX
1722#undef CONF_CHECK_MAX
1723#undef CONF_HANDLE_T
1724#undef CONF_HANDLE_T_U
1725#undef CONF_HANDLE_T_SIGNED
1726#undef CONF_HANDLE_UNSIGNED
1727#undef CONF_HANDLE_SIZE_T
1728#undef CONF_HANDLE_SSIZE_T
1729#undef CONF_HANDLE_CHAR_P
1730 /* Re-enable diagnostic "-Wtype-limits" */
1731 JEMALLOC_DIAGNOSTIC_POP
1732 }
1733 if (opt_abort_conf && had_conf_error) {
1734 malloc_abort_invalid_conf();
1735 }
1736 }
1737 atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1738}
1739
1740static bool
1741malloc_conf_init_check_deps(void) {
1742 if (opt_prof_leak_error && !opt_prof_final) {
1743 malloc_printf("<jemalloc>: prof_leak_error is set w/o "
1744 "prof_final.\n");
1745 return true;
1746 }
1747 /* To emphasize in the stats output that opt is disabled when !debug. */
1748 if (!config_debug) {
1749 opt_debug_double_free_max_scan = 0;
1750 }
1751
1752 return false;
1753}
1754
1755static void
1756malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
1757 const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL,
1758 NULL};
1759 char buf[PATH_MAX + 1];
1760
1761 /* The first call only set the confirm_conf option and opts_cache */
1762 malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
1763 malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
1764 NULL);
1765 if (malloc_conf_init_check_deps()) {
1766 /* check_deps does warning msg only; abort below if needed. */
1767 if (opt_abort_conf) {
1768 malloc_abort_invalid_conf();
1769 }
1770 }
1771}
1772
1773#undef MALLOC_CONF_NSOURCES
1774
1775static bool
1776malloc_init_hard_needed(void) {
1777 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1778 malloc_init_recursible)) {
1779 /*
1780 * Another thread initialized the allocator before this one
1781 * acquired init_lock, or this thread is the initializing
1782 * thread, and it is recursively allocating.
1783 */
1784 return false;
1785 }
1786#ifdef JEMALLOC_THREADED_INIT
1787 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1788 /* Busy-wait until the initializing thread completes. */
1789 spin_t spinner = SPIN_INITIALIZER;
1790 do {
1791 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1792 spin_adaptive(&spinner);
1793 malloc_mutex_lock(TSDN_NULL, &init_lock);
1794 } while (!malloc_initialized());
1795 return false;
1796 }
1797#endif
1798 return true;
1799}
1800
1801static bool
1802malloc_init_hard_a0_locked() {
1803 malloc_initializer = INITIALIZER;
1804
1805 JEMALLOC_DIAGNOSTIC_PUSH
1806 JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
1807 sc_data_t sc_data = {0};
1808 JEMALLOC_DIAGNOSTIC_POP
1809
1810 /*
1811 * Ordering here is somewhat tricky; we need sc_boot() first, since that
1812 * determines what the size classes will be, and then
1813 * malloc_conf_init(), since any slab size tweaking will need to be done
1814 * before sz_boot and bin_info_boot, which assume that the values they
1815 * read out of sc_data_global are final.
1816 */
1817 sc_boot(&sc_data);
1818 unsigned bin_shard_sizes[SC_NBINS];
1819 bin_shard_sizes_boot(bin_shard_sizes);
1820 /*
1821 * prof_boot0 only initializes opt_prof_prefix. We need to do it before
1822 * we parse malloc_conf options, in case malloc_conf parsing overwrites
1823 * it.
1824 */
1825 if (config_prof) {
1826 prof_boot0();
1827 }
1828 malloc_conf_init(&sc_data, bin_shard_sizes);
1829 san_init(opt_lg_san_uaf_align);
1830 sz_boot(&sc_data, opt_cache_oblivious);
1831 bin_info_boot(&sc_data, bin_shard_sizes);
1832
1833 if (opt_stats_print) {
1834 /* Print statistics at exit. */
1835 if (atexit(stats_print_atexit) != 0) {
1836 malloc_write("<jemalloc>: Error in atexit()\n");
1837 if (opt_abort) {
1838 abort();
1839 }
1840 }
1841 }
1842
1843 if (stats_boot()) {
1844 return true;
1845 }
1846 if (pages_boot()) {
1847 return true;
1848 }
1849 if (base_boot(TSDN_NULL)) {
1850 return true;
1851 }
1852 /* emap_global is static, hence zeroed. */
1853 if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) {
1854 return true;
1855 }
1856 if (extent_boot()) {
1857 return true;
1858 }
1859 if (ctl_boot()) {
1860 return true;
1861 }
1862 if (config_prof) {
1863 prof_boot1();
1864 }
1865 if (opt_hpa && !hpa_supported()) {
1866 malloc_printf("<jemalloc>: HPA not supported in the current "
1867 "configuration; %s.",
1868 opt_abort_conf ? "aborting" : "disabling");
1869 if (opt_abort_conf) {
1870 malloc_abort_invalid_conf();
1871 } else {
1872 opt_hpa = false;
1873 }
1874 }
1875 if (arena_boot(&sc_data, b0get(), opt_hpa)) {
1876 return true;
1877 }
1878 if (tcache_boot(TSDN_NULL, b0get())) {
1879 return true;
1880 }
1881 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1882 malloc_mutex_rank_exclusive)) {
1883 return true;
1884 }
1885 hook_boot();
1886 /*
1887 * Create enough scaffolding to allow recursive allocation in
1888 * malloc_ncpus().
1889 */
1890 narenas_auto = 1;
1891 manual_arena_base = narenas_auto + 1;
1892 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1893 /*
1894 * Initialize one arena here. The rest are lazily created in
1895 * arena_choose_hard().
1896 */
1897 if (arena_init(TSDN_NULL, 0, &arena_config_default) == NULL) {
1898 return true;
1899 }
1900 a0 = arena_get(TSDN_NULL, 0, false);
1901
1902 if (opt_hpa && !hpa_supported()) {
1903 malloc_printf("<jemalloc>: HPA not supported in the current "
1904 "configuration; %s.",
1905 opt_abort_conf ? "aborting" : "disabling");
1906 if (opt_abort_conf) {
1907 malloc_abort_invalid_conf();
1908 } else {
1909 opt_hpa = false;
1910 }
1911 } else if (opt_hpa) {
1912 hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
1913 hpa_shard_opts.deferral_allowed = background_thread_enabled();
1914 if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
1915 &hpa_shard_opts, &opt_hpa_sec_opts)) {
1916 return true;
1917 }
1918 }
1919
1920 malloc_init_state = malloc_init_a0_initialized;
1921
1922 return false;
1923}
1924
1925static bool
1926malloc_init_hard_a0(void) {
1927 bool ret;
1928
1929 malloc_mutex_lock(TSDN_NULL, &init_lock);
1930 ret = malloc_init_hard_a0_locked();
1931 malloc_mutex_unlock(TSDN_NULL, &init_lock);
1932 return ret;
1933}
1934
1935/* Initialize data structures which may trigger recursive allocation. */
1936static bool
1937malloc_init_hard_recursible(void) {
1938 malloc_init_state = malloc_init_recursible;
1939
1940 ncpus = malloc_ncpus();
1941 if (opt_percpu_arena != percpu_arena_disabled) {
1942 bool cpu_count_is_deterministic =
1943 malloc_cpu_count_is_deterministic();
1944 if (!cpu_count_is_deterministic) {
1945 /*
1946 * If # of CPU is not deterministic, and narenas not
1947 * specified, disables per cpu arena since it may not
1948 * detect CPU IDs properly.
1949 */
1950 if (opt_narenas == 0) {
1951 opt_percpu_arena = percpu_arena_disabled;
1952 malloc_write("<jemalloc>: Number of CPUs "
1953 "detected is not deterministic. Per-CPU "
1954 "arena disabled.\n");
1955 if (opt_abort_conf) {
1956 malloc_abort_invalid_conf();
1957 }
1958 if (opt_abort) {
1959 abort();
1960 }
1961 }
1962 }
1963 }
1964
1965#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1966 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1967 !defined(__native_client__))
1968 /* LinuxThreads' pthread_atfork() allocates. */
1969 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1970 jemalloc_postfork_child) != 0) {
1971 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1972 if (opt_abort) {
1973 abort();
1974 }
1975 return true;
1976 }
1977#endif
1978
1979 if (background_thread_boot0()) {
1980 return true;
1981 }
1982
1983 return false;
1984}
1985
1986static unsigned
1987malloc_narenas_default(void) {
1988 assert(ncpus > 0);
1989 /*
1990 * For SMP systems, create more than one arena per CPU by
1991 * default.
1992 */
1993 if (ncpus > 1) {
1994 fxp_t fxp_ncpus = FXP_INIT_INT(ncpus);
1995 fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio);
1996 uint32_t int_goal = fxp_round_nearest(goal);
1997 if (int_goal == 0) {
1998 return 1;
1999 }
2000 return int_goal;
2001 } else {
2002 return 1;
2003 }
2004}
2005
2006static percpu_arena_mode_t
2007percpu_arena_as_initialized(percpu_arena_mode_t mode) {
2008 assert(!malloc_initialized());
2009 assert(mode <= percpu_arena_disabled);
2010
2011 if (mode != percpu_arena_disabled) {
2012 mode += percpu_arena_mode_enabled_base;
2013 }
2014
2015 return mode;
2016}
2017
2018static bool
2019malloc_init_narenas(void) {
2020 assert(ncpus > 0);
2021
2022 if (opt_percpu_arena != percpu_arena_disabled) {
2023 if (!have_percpu_arena || malloc_getcpu() < 0) {
2024 opt_percpu_arena = percpu_arena_disabled;
2025 malloc_printf("<jemalloc>: perCPU arena getcpu() not "
2026 "available. Setting narenas to %u.\n", opt_narenas ?
2027 opt_narenas : malloc_narenas_default());
2028 if (opt_abort) {
2029 abort();
2030 }
2031 } else {
2032 if (ncpus >= MALLOCX_ARENA_LIMIT) {
2033 malloc_printf("<jemalloc>: narenas w/ percpu"
2034 "arena beyond limit (%d)\n", ncpus);
2035 if (opt_abort) {
2036 abort();
2037 }
2038 return true;
2039 }
2040 /* NB: opt_percpu_arena isn't fully initialized yet. */
2041 if (percpu_arena_as_initialized(opt_percpu_arena) ==
2042 per_phycpu_arena && ncpus % 2 != 0) {
2043 malloc_printf("<jemalloc>: invalid "
2044 "configuration -- per physical CPU arena "
2045 "with odd number (%u) of CPUs (no hyper "
2046 "threading?).\n", ncpus);
2047 if (opt_abort)
2048 abort();
2049 }
2050 unsigned n = percpu_arena_ind_limit(
2051 percpu_arena_as_initialized(opt_percpu_arena));
2052 if (opt_narenas < n) {
2053 /*
2054 * If narenas is specified with percpu_arena
2055 * enabled, actual narenas is set as the greater
2056 * of the two. percpu_arena_choose will be free
2057 * to use any of the arenas based on CPU
2058 * id. This is conservative (at a small cost)
2059 * but ensures correctness.
2060 *
2061 * If for some reason the ncpus determined at
2062 * boot is not the actual number (e.g. because
2063 * of affinity setting from numactl), reserving
2064 * narenas this way provides a workaround for
2065 * percpu_arena.
2066 */
2067 opt_narenas = n;
2068 }
2069 }
2070 }
2071 if (opt_narenas == 0) {
2072 opt_narenas = malloc_narenas_default();
2073 }
2074 assert(opt_narenas > 0);
2075
2076 narenas_auto = opt_narenas;
2077 /*
2078 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
2079 */
2080 if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
2081 narenas_auto = MALLOCX_ARENA_LIMIT - 1;
2082 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
2083 narenas_auto);
2084 }
2085 narenas_total_set(narenas_auto);
2086 if (arena_init_huge()) {
2087 narenas_total_inc();
2088 }
2089 manual_arena_base = narenas_total_get();
2090
2091 return false;
2092}
2093
2094static void
2095malloc_init_percpu(void) {
2096 opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
2097}
2098
2099static bool
2100malloc_init_hard_finish(void) {
2101 if (malloc_mutex_boot()) {
2102 return true;
2103 }
2104
2105 malloc_init_state = malloc_init_initialized;
2106 malloc_slow_flag_init();
2107
2108 return false;
2109}
2110
2111static void
2112malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
2113 malloc_mutex_assert_owner(tsdn, &init_lock);
2114 malloc_mutex_unlock(tsdn, &init_lock);
2115 if (reentrancy_set) {
2116 assert(!tsdn_null(tsdn));
2117 tsd_t *tsd = tsdn_tsd(tsdn);
2118 assert(tsd_reentrancy_level_get(tsd) > 0);
2119 post_reentrancy(tsd);
2120 }
2121}
2122
2123static bool
2124malloc_init_hard(void) {
2125 tsd_t *tsd;
2126
2127#if defined(_WIN32) && _WIN32_WINNT < 0x0600
2128 _init_init_lock();
2129#endif
2130 malloc_mutex_lock(TSDN_NULL, &init_lock);
2131
2132#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
2133 malloc_init_hard_cleanup(tsdn, reentrancy); \
2134 return ret;
2135
2136 if (!malloc_init_hard_needed()) {
2137 UNLOCK_RETURN(TSDN_NULL, false, false)
2138 }
2139
2140 if (malloc_init_state != malloc_init_a0_initialized &&
2141 malloc_init_hard_a0_locked()) {
2142 UNLOCK_RETURN(TSDN_NULL, true, false)
2143 }
2144
2145 malloc_mutex_unlock(TSDN_NULL, &init_lock);
2146 /* Recursive allocation relies on functional tsd. */
2147 tsd = malloc_tsd_boot0();
2148 if (tsd == NULL) {
2149 return true;
2150 }
2151 if (malloc_init_hard_recursible()) {
2152 return true;
2153 }
2154
2155 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
2156 /* Set reentrancy level to 1 during init. */
2157 pre_reentrancy(tsd, NULL);
2158 /* Initialize narenas before prof_boot2 (for allocation). */
2159 if (malloc_init_narenas()
2160 || background_thread_boot1(tsd_tsdn(tsd), b0get())) {
2161 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
2162 }
2163 if (config_prof && prof_boot2(tsd, b0get())) {
2164 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
2165 }
2166
2167 malloc_init_percpu();
2168
2169 if (malloc_init_hard_finish()) {
2170 UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
2171 }
2172 post_reentrancy(tsd);
2173 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
2174
2175 witness_assert_lockless(witness_tsd_tsdn(
2176 tsd_witness_tsdp_get_unsafe(tsd)));
2177 malloc_tsd_boot1();
2178 /* Update TSD after tsd_boot1. */
2179 tsd = tsd_fetch();
2180 if (opt_background_thread) {
2181 assert(have_background_thread);
2182 /*
2183 * Need to finish init & unlock first before creating background
2184 * threads (pthread_create depends on malloc). ctl_init (which
2185 * sets isthreaded) needs to be called without holding any lock.
2186 */
2187 background_thread_ctl_init(tsd_tsdn(tsd));
2188 if (background_thread_create(tsd, 0)) {
2189 return true;
2190 }
2191 }
2192#undef UNLOCK_RETURN
2193 return false;
2194}
2195
2196/*
2197 * End initialization functions.
2198 */
2199/******************************************************************************/
2200/*
2201 * Begin allocation-path internal functions and data structures.
2202 */
2203
2204/*
2205 * Settings determined by the documented behavior of the allocation functions.
2206 */
2207typedef struct static_opts_s static_opts_t;
2208struct static_opts_s {
2209 /* Whether or not allocation size may overflow. */
2210 bool may_overflow;
2211
2212 /*
2213 * Whether or not allocations (with alignment) of size 0 should be
2214 * treated as size 1.
2215 */
2216 bool bump_empty_aligned_alloc;
2217 /*
2218 * Whether to assert that allocations are not of size 0 (after any
2219 * bumping).
2220 */
2221 bool assert_nonempty_alloc;
2222
2223 /*
2224 * Whether or not to modify the 'result' argument to malloc in case of
2225 * error.
2226 */
2227 bool null_out_result_on_error;
2228 /* Whether to set errno when we encounter an error condition. */
2229 bool set_errno_on_error;
2230
2231 /*
2232 * The minimum valid alignment for functions requesting aligned storage.
2233 */
2234 size_t min_alignment;
2235
2236 /* The error string to use if we oom. */
2237 const char *oom_string;
2238 /* The error string to use if the passed-in alignment is invalid. */
2239 const char *invalid_alignment_string;
2240
2241 /*
2242 * False if we're configured to skip some time-consuming operations.
2243 *
2244 * This isn't really a malloc "behavior", but it acts as a useful
2245 * summary of several other static (or at least, static after program
2246 * initialization) options.
2247 */
2248 bool slow;
2249 /*
2250 * Return size.
2251 */
2252 bool usize;
2253};
2254
2255JEMALLOC_ALWAYS_INLINE void
2256static_opts_init(static_opts_t *static_opts) {
2257 static_opts->may_overflow = false;
2258 static_opts->bump_empty_aligned_alloc = false;
2259 static_opts->assert_nonempty_alloc = false;
2260 static_opts->null_out_result_on_error = false;
2261 static_opts->set_errno_on_error = false;
2262 static_opts->min_alignment = 0;
2263 static_opts->oom_string = "";
2264 static_opts->invalid_alignment_string = "";
2265 static_opts->slow = false;
2266 static_opts->usize = false;
2267}
2268
2269/*
2270 * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
2271 * should have one constant here per magic value there. Note however that the
2272 * representations need not be related.
2273 */
2274#define TCACHE_IND_NONE ((unsigned)-1)
2275#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
2276#define ARENA_IND_AUTOMATIC ((unsigned)-1)
2277
2278typedef struct dynamic_opts_s dynamic_opts_t;
2279struct dynamic_opts_s {
2280 void **result;
2281 size_t usize;
2282 size_t num_items;
2283 size_t item_size;
2284 size_t alignment;
2285 bool zero;
2286 unsigned tcache_ind;
2287 unsigned arena_ind;
2288};
2289
2290JEMALLOC_ALWAYS_INLINE void
2291dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
2292 dynamic_opts->result = NULL;
2293 dynamic_opts->usize = 0;
2294 dynamic_opts->num_items = 0;
2295 dynamic_opts->item_size = 0;
2296 dynamic_opts->alignment = 0;
2297 dynamic_opts->zero = false;
2298 dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
2299 dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
2300}
2301
2302/*
2303 * ind parameter is optional and is only checked and filled if alignment == 0;
2304 * return true if result is out of range.
2305 */
2306JEMALLOC_ALWAYS_INLINE bool
2307aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind,
2308 bool bump_empty_aligned_alloc) {
2309 assert(usize != NULL);
2310 if (alignment == 0) {
2311 if (ind != NULL) {
2312 *ind = sz_size2index(size);
2313 if (unlikely(*ind >= SC_NSIZES)) {
2314 return true;
2315 }
2316 *usize = sz_index2size(*ind);
2317 assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS);
2318 return false;
2319 }
2320 *usize = sz_s2u(size);
2321 } else {
2322 if (bump_empty_aligned_alloc && unlikely(size == 0)) {
2323 size = 1;
2324 }
2325 *usize = sz_sa2u(size, alignment);
2326 }
2327 if (unlikely(*usize == 0 || *usize > SC_LARGE_MAXCLASS)) {
2328 return true;
2329 }
2330 return false;
2331}
2332
2333JEMALLOC_ALWAYS_INLINE bool
2334zero_get(bool guarantee, bool slow) {
2335 if (config_fill && slow && unlikely(opt_zero)) {
2336 return true;
2337 } else {
2338 return guarantee;
2339 }
2340}
2341
2342JEMALLOC_ALWAYS_INLINE tcache_t *
2343tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) {
2344 tcache_t *tcache;
2345 if (tcache_ind == TCACHE_IND_AUTOMATIC) {
2346 if (likely(!slow)) {
2347 /* Getting tcache ptr unconditionally. */
2348 tcache = tsd_tcachep_get(tsd);
2349 assert(tcache == tcache_get(tsd));
2350 } else if (is_alloc ||
2351 likely(tsd_reentrancy_level_get(tsd) == 0)) {
2352 tcache = tcache_get(tsd);
2353 } else {
2354 tcache = NULL;
2355 }
2356 } else {
2357 /*
2358 * Should not specify tcache on deallocation path when being
2359 * reentrant.
2360 */
2361 assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 ||
2362 tsd_state_nocleanup(tsd));
2363 if (tcache_ind == TCACHE_IND_NONE) {
2364 tcache = NULL;
2365 } else {
2366 tcache = tcaches_get(tsd, tcache_ind);
2367 }
2368 }
2369 return tcache;
2370}
2371
2372/* Return true if a manual arena is specified and arena_get() OOMs. */
2373JEMALLOC_ALWAYS_INLINE bool
2374arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) {
2375 if (arena_ind == ARENA_IND_AUTOMATIC) {
2376 /*
2377 * In case of automatic arena management, we defer arena
2378 * computation until as late as we can, hoping to fill the
2379 * allocation out of the tcache.
2380 */
2381 *arena_p = NULL;
2382 } else {
2383 *arena_p = arena_get(tsd_tsdn(tsd), arena_ind, true);
2384 if (unlikely(*arena_p == NULL) && arena_ind >= narenas_auto) {
2385 return true;
2386 }
2387 }
2388 return false;
2389}
2390
2391/* ind is ignored if dopts->alignment > 0. */
2392JEMALLOC_ALWAYS_INLINE void *
2393imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
2394 size_t size, size_t usize, szind_t ind) {
2395 /* Fill in the tcache. */
2396 tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind,
2397 sopts->slow, /* is_alloc */ true);
2398
2399 /* Fill in the arena. */
2400 arena_t *arena;
2401 if (arena_get_from_ind(tsd, dopts->arena_ind, &arena)) {
2402 return NULL;
2403 }
2404
2405 if (unlikely(dopts->alignment != 0)) {
2406 return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
2407 dopts->zero, tcache, arena);
2408 }
2409
2410 return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
2411 arena, sopts->slow);
2412}
2413
2414JEMALLOC_ALWAYS_INLINE void *
2415imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
2416 size_t usize, szind_t ind) {
2417 void *ret;
2418
2419 /*
2420 * For small allocations, sampling bumps the usize. If so, we allocate
2421 * from the ind_large bucket.
2422 */
2423 szind_t ind_large;
2424 size_t bumped_usize = usize;
2425
2426 dopts->alignment = prof_sample_align(dopts->alignment);
2427 if (usize <= SC_SMALL_MAXCLASS) {
2428 assert(((dopts->alignment == 0) ?
2429 sz_s2u(SC_LARGE_MINCLASS) :
2430 sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
2431 == SC_LARGE_MINCLASS);
2432 ind_large = sz_size2index(SC_LARGE_MINCLASS);
2433 bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
2434 ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
2435 bumped_usize, ind_large);
2436 if (unlikely(ret == NULL)) {
2437 return NULL;
2438 }
2439 arena_prof_promote(tsd_tsdn(tsd), ret, usize);
2440 } else {
2441 ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
2442 }
2443 assert(prof_sample_aligned(ret));
2444
2445 return ret;
2446}
2447
2448/*
2449 * Returns true if the allocation will overflow, and false otherwise. Sets
2450 * *size to the product either way.
2451 */
2452JEMALLOC_ALWAYS_INLINE bool
2453compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
2454 size_t *size) {
2455 /*
2456 * This function is just num_items * item_size, except that we may have
2457 * to check for overflow.
2458 */
2459
2460 if (!may_overflow) {
2461 assert(dopts->num_items == 1);
2462 *size = dopts->item_size;
2463 return false;
2464 }
2465
2466 /* A size_t with its high-half bits all set to 1. */
2467 static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
2468
2469 *size = dopts->item_size * dopts->num_items;
2470
2471 if (unlikely(*size == 0)) {
2472 return (dopts->num_items != 0 && dopts->item_size != 0);
2473 }
2474
2475 /*
2476 * We got a non-zero size, but we don't know if we overflowed to get
2477 * there. To avoid having to do a divide, we'll be clever and note that
2478 * if both A and B can be represented in N/2 bits, then their product
2479 * can be represented in N bits (without the possibility of overflow).
2480 */
2481 if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
2482 return false;
2483 }
2484 if (likely(*size / dopts->item_size == dopts->num_items)) {
2485 return false;
2486 }
2487 return true;
2488}
2489
2490JEMALLOC_ALWAYS_INLINE int
2491imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
2492 /* Where the actual allocated memory will live. */
2493 void *allocation = NULL;
2494 /* Filled in by compute_size_with_overflow below. */
2495 size_t size = 0;
2496 /*
2497 * The zero initialization for ind is actually dead store, in that its
2498 * value is reset before any branch on its value is taken. Sometimes
2499 * though, it's convenient to pass it as arguments before this point.
2500 * To avoid undefined behavior then, we initialize it with dummy stores.
2501 */
2502 szind_t ind = 0;
2503 /* usize will always be properly initialized. */
2504 size_t usize;
2505
2506 /* Reentrancy is only checked on slow path. */
2507 int8_t reentrancy_level;
2508
2509 /* Compute the amount of memory the user wants. */
2510 if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
2511 &size))) {
2512 goto label_oom;
2513 }
2514
2515 if (unlikely(dopts->alignment < sopts->min_alignment
2516 || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
2517 goto label_invalid_alignment;
2518 }
2519
2520 /* This is the beginning of the "core" algorithm. */
2521 dopts->zero = zero_get(dopts->zero, sopts->slow);
2522 if (aligned_usize_get(size, dopts->alignment, &usize, &ind,
2523 sopts->bump_empty_aligned_alloc)) {
2524 goto label_oom;
2525 }
2526 dopts->usize = usize;
2527 /* Validate the user input. */
2528 if (sopts->assert_nonempty_alloc) {
2529 assert (size != 0);
2530 }
2531
2532 check_entry_exit_locking(tsd_tsdn(tsd));
2533
2534 /*
2535 * If we need to handle reentrancy, we can do it out of a
2536 * known-initialized arena (i.e. arena 0).
2537 */
2538 reentrancy_level = tsd_reentrancy_level_get(tsd);
2539 if (sopts->slow && unlikely(reentrancy_level > 0)) {
2540 /*
2541 * We should never specify particular arenas or tcaches from
2542 * within our internal allocations.
2543 */
2544 assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
2545 dopts->tcache_ind == TCACHE_IND_NONE);
2546 assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
2547 dopts->tcache_ind = TCACHE_IND_NONE;
2548 /* We know that arena 0 has already been initialized. */
2549 dopts->arena_ind = 0;
2550 }
2551
2552 /*
2553 * If dopts->alignment > 0, then ind is still 0, but usize was computed
2554 * in the previous if statement. Down the positive alignment path,
2555 * imalloc_no_sample and imalloc_sample will ignore ind.
2556 */
2557
2558 /* If profiling is on, get our profiling context. */
2559 if (config_prof && opt_prof) {
2560 bool prof_active = prof_active_get_unlocked();
2561 bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
2562 prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active,
2563 sample_event);
2564
2565 emap_alloc_ctx_t alloc_ctx;
2566 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
2567 alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS);
2568 allocation = imalloc_no_sample(
2569 sopts, dopts, tsd, usize, usize, ind);
2570 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
2571 allocation = imalloc_sample(
2572 sopts, dopts, tsd, usize, ind);
2573 alloc_ctx.slab = false;
2574 } else {
2575 allocation = NULL;
2576 }
2577
2578 if (unlikely(allocation == NULL)) {
2579 prof_alloc_rollback(tsd, tctx);
2580 goto label_oom;
2581 }
2582 prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx);
2583 } else {
2584 assert(!opt_prof);
2585 allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
2586 ind);
2587 if (unlikely(allocation == NULL)) {
2588 goto label_oom;
2589 }
2590 }
2591
2592 /*
2593 * Allocation has been done at this point. We still have some
2594 * post-allocation work to do though.
2595 */
2596
2597 thread_alloc_event(tsd, usize);
2598
2599 assert(dopts->alignment == 0
2600 || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
2601
2602 assert(usize == isalloc(tsd_tsdn(tsd), allocation));
2603
2604 if (config_fill && sopts->slow && !dopts->zero
2605 && unlikely(opt_junk_alloc)) {
2606 junk_alloc_callback(allocation, usize);
2607 }
2608
2609 if (sopts->slow) {
2610 UTRACE(0, size, allocation);
2611 }
2612
2613 /* Success! */
2614 check_entry_exit_locking(tsd_tsdn(tsd));
2615 *dopts->result = allocation;
2616 return 0;
2617
2618label_oom:
2619 if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
2620 malloc_write(sopts->oom_string);
2621 abort();
2622 }
2623
2624 if (sopts->slow) {
2625 UTRACE(NULL, size, NULL);
2626 }
2627
2628 check_entry_exit_locking(tsd_tsdn(tsd));
2629
2630 if (sopts->set_errno_on_error) {
2631 set_errno(ENOMEM);
2632 }
2633
2634 if (sopts->null_out_result_on_error) {
2635 *dopts->result = NULL;
2636 }
2637
2638 return ENOMEM;
2639
2640 /*
2641 * This label is only jumped to by one goto; we move it out of line
2642 * anyways to avoid obscuring the non-error paths, and for symmetry with
2643 * the oom case.
2644 */
2645label_invalid_alignment:
2646 if (config_xmalloc && unlikely(opt_xmalloc)) {
2647 malloc_write(sopts->invalid_alignment_string);
2648 abort();
2649 }
2650
2651 if (sopts->set_errno_on_error) {
2652 set_errno(EINVAL);
2653 }
2654
2655 if (sopts->slow) {
2656 UTRACE(NULL, size, NULL);
2657 }
2658
2659 check_entry_exit_locking(tsd_tsdn(tsd));
2660
2661 if (sopts->null_out_result_on_error) {
2662 *dopts->result = NULL;
2663 }
2664
2665 return EINVAL;
2666}
2667
2668JEMALLOC_ALWAYS_INLINE bool
2669imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) {
2670 if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
2671 if (config_xmalloc && unlikely(opt_xmalloc)) {
2672 malloc_write(sopts->oom_string);
2673 abort();
2674 }
2675 UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
2676 set_errno(ENOMEM);
2677 *dopts->result = NULL;
2678
2679 return false;
2680 }
2681
2682 return true;
2683}
2684
2685/* Returns the errno-style error code of the allocation. */
2686JEMALLOC_ALWAYS_INLINE int
2687imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
2688 if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
2689 return ENOMEM;
2690 }
2691
2692 /* We always need the tsd. Let's grab it right away. */
2693 tsd_t *tsd = tsd_fetch();
2694 assert(tsd);
2695 if (likely(tsd_fast(tsd))) {
2696 /* Fast and common path. */
2697 tsd_assert_fast(tsd);
2698 sopts->slow = false;
2699 return imalloc_body(sopts, dopts, tsd);
2700 } else {
2701 if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
2702 return ENOMEM;
2703 }
2704
2705 sopts->slow = true;
2706 return imalloc_body(sopts, dopts, tsd);
2707 }
2708}
2709
2710JEMALLOC_NOINLINE
2711void *
2712malloc_default(size_t size) {
2713 void *ret;
2714 static_opts_t sopts;
2715 dynamic_opts_t dopts;
2716
2717 /*
2718 * This variant has logging hook on exit but not on entry. It's callled
2719 * only by je_malloc, below, which emits the entry one for us (and, if
2720 * it calls us, does so only via tail call).
2721 */
2722
2723 static_opts_init(&sopts);
2724 dynamic_opts_init(&dopts);
2725
2726 sopts.null_out_result_on_error = true;
2727 sopts.set_errno_on_error = true;
2728 sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2729
2730 dopts.result = &ret;
2731 dopts.num_items = 1;
2732 dopts.item_size = size;
2733
2734 imalloc(&sopts, &dopts);
2735 /*
2736 * Note that this branch gets optimized away -- it immediately follows
2737 * the check on tsd_fast that sets sopts.slow.
2738 */
2739 if (sopts.slow) {
2740 uintptr_t args[3] = {size};
2741 hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
2742 }
2743
2744 LOG("core.malloc.exit", "result: %p", ret);
2745
2746 return ret;
2747}
2748
2749/******************************************************************************/
2750/*
2751 * Begin malloc(3)-compatible functions.
2752 */
2753
2754JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2755void JEMALLOC_NOTHROW *
2756JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2757je_malloc(size_t size) {
2758 return imalloc_fastpath(size, &malloc_default);
2759}
2760
2761JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2762JEMALLOC_ATTR(nonnull(1))
2763je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2764 int ret;
2765 static_opts_t sopts;
2766 dynamic_opts_t dopts;
2767
2768 LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2769 "size: %zu", memptr, alignment, size);
2770
2771 static_opts_init(&sopts);
2772 dynamic_opts_init(&dopts);
2773
2774 sopts.bump_empty_aligned_alloc = true;
2775 sopts.min_alignment = sizeof(void *);
2776 sopts.oom_string =
2777 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2778 sopts.invalid_alignment_string =
2779 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2780
2781 dopts.result = memptr;
2782 dopts.num_items = 1;
2783 dopts.item_size = size;
2784 dopts.alignment = alignment;
2785
2786 ret = imalloc(&sopts, &dopts);
2787 if (sopts.slow) {
2788 uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment,
2789 (uintptr_t)size};
2790 hook_invoke_alloc(hook_alloc_posix_memalign, *memptr,
2791 (uintptr_t)ret, args);
2792 }
2793
2794 LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2795 *memptr);
2796
2797 return ret;
2798}
2799
2800JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2801void JEMALLOC_NOTHROW *
2802JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2803je_aligned_alloc(size_t alignment, size_t size) {
2804 void *ret;
2805
2806 static_opts_t sopts;
2807 dynamic_opts_t dopts;
2808
2809 LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2810 alignment, size);
2811
2812 static_opts_init(&sopts);
2813 dynamic_opts_init(&dopts);
2814
2815 sopts.bump_empty_aligned_alloc = true;
2816 sopts.null_out_result_on_error = true;
2817 sopts.set_errno_on_error = true;
2818 sopts.min_alignment = 1;
2819 sopts.oom_string =
2820 "<jemalloc>: Error allocating aligned memory: out of memory\n";
2821 sopts.invalid_alignment_string =
2822 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2823
2824 dopts.result = &ret;
2825 dopts.num_items = 1;
2826 dopts.item_size = size;
2827 dopts.alignment = alignment;
2828
2829 imalloc(&sopts, &dopts);
2830 if (sopts.slow) {
2831 uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
2832 hook_invoke_alloc(hook_alloc_aligned_alloc, ret,
2833 (uintptr_t)ret, args);
2834 }
2835
2836 LOG("core.aligned_alloc.exit", "result: %p", ret);
2837
2838 return ret;
2839}
2840
2841JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2842void JEMALLOC_NOTHROW *
2843JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2844je_calloc(size_t num, size_t size) {
2845 void *ret;
2846 static_opts_t sopts;
2847 dynamic_opts_t dopts;
2848
2849 LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2850
2851 static_opts_init(&sopts);
2852 dynamic_opts_init(&dopts);
2853
2854 sopts.may_overflow = true;
2855 sopts.null_out_result_on_error = true;
2856 sopts.set_errno_on_error = true;
2857 sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2858
2859 dopts.result = &ret;
2860 dopts.num_items = num;
2861 dopts.item_size = size;
2862 dopts.zero = true;
2863
2864 imalloc(&sopts, &dopts);
2865 if (sopts.slow) {
2866 uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
2867 hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
2868 }
2869
2870 LOG("core.calloc.exit", "result: %p", ret);
2871
2872 return ret;
2873}
2874
2875JEMALLOC_ALWAYS_INLINE void
2876ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2877 if (!slow_path) {
2878 tsd_assert_fast(tsd);
2879 }
2880 check_entry_exit_locking(tsd_tsdn(tsd));
2881 if (tsd_reentrancy_level_get(tsd) != 0) {
2882 assert(slow_path);
2883 }
2884
2885 assert(ptr != NULL);
2886 assert(malloc_initialized() || IS_INITIALIZER);
2887
2888 emap_alloc_ctx_t alloc_ctx;
2889 emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
2890 &alloc_ctx);
2891 assert(alloc_ctx.szind != SC_NSIZES);
2892
2893 size_t usize = sz_index2size(alloc_ctx.szind);
2894 if (config_prof && opt_prof) {
2895 prof_free(tsd, ptr, usize, &alloc_ctx);
2896 }
2897
2898 if (likely(!slow_path)) {
2899 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2900 false);
2901 } else {
2902 if (config_fill && slow_path && opt_junk_free) {
2903 junk_free_callback(ptr, usize);
2904 }
2905 idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2906 true);
2907 }
2908 thread_dalloc_event(tsd, usize);
2909}
2910
2911JEMALLOC_ALWAYS_INLINE bool
2912maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
2913 if (config_opt_size_checks) {
2914 emap_alloc_ctx_t dbg_ctx;
2915 emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
2916 &dbg_ctx);
2917 if (alloc_ctx->szind != dbg_ctx.szind) {
2918 safety_check_fail_sized_dealloc(
2919 /* current_dealloc */ true, ptr,
2920 /* true_size */ sz_size2index(dbg_ctx.szind),
2921 /* input_size */ sz_size2index(alloc_ctx->szind));
2922 return true;
2923 }
2924 if (alloc_ctx->slab != dbg_ctx.slab) {
2925 safety_check_fail(
2926 "Internal heap corruption detected: "
2927 "mismatch in slab bit");
2928 return true;
2929 }
2930 }
2931 return false;
2932}
2933
2934JEMALLOC_ALWAYS_INLINE void
2935isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2936 if (!slow_path) {
2937 tsd_assert_fast(tsd);
2938 }
2939 check_entry_exit_locking(tsd_tsdn(tsd));
2940 if (tsd_reentrancy_level_get(tsd) != 0) {
2941 assert(slow_path);
2942 }
2943
2944 assert(ptr != NULL);
2945 assert(malloc_initialized() || IS_INITIALIZER);
2946
2947 emap_alloc_ctx_t alloc_ctx;
2948 if (!config_prof) {
2949 alloc_ctx.szind = sz_size2index(usize);
2950 alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
2951 } else {
2952 if (likely(!prof_sample_aligned(ptr))) {
2953 /*
2954 * When the ptr is not page aligned, it was not sampled.
2955 * usize can be trusted to determine szind and slab.
2956 */
2957 alloc_ctx.szind = sz_size2index(usize);
2958 alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
2959 } else if (opt_prof) {
2960 emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
2961 ptr, &alloc_ctx);
2962
2963 if (config_opt_safety_checks) {
2964 /* Small alloc may have !slab (sampled). */
2965 if (unlikely(alloc_ctx.szind !=
2966 sz_size2index(usize))) {
2967 safety_check_fail_sized_dealloc(
2968 /* current_dealloc */ true, ptr,
2969 /* true_size */ sz_index2size(
2970 alloc_ctx.szind),
2971 /* input_size */ usize);
2972 }
2973 }
2974 } else {
2975 alloc_ctx.szind = sz_size2index(usize);
2976 alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
2977 }
2978 }
2979 bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
2980 if (fail) {
2981 /*
2982 * This is a heap corruption bug. In real life we'll crash; for
2983 * the unit test we just want to avoid breaking anything too
2984 * badly to get a test result out. Let's leak instead of trying
2985 * to free.
2986 */
2987 return;
2988 }
2989
2990 if (config_prof && opt_prof) {
2991 prof_free(tsd, ptr, usize, &alloc_ctx);
2992 }
2993 if (likely(!slow_path)) {
2994 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
2995 false);
2996 } else {
2997 if (config_fill && slow_path && opt_junk_free) {
2998 junk_free_callback(ptr, usize);
2999 }
3000 isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
3001 true);
3002 }
3003 thread_dalloc_event(tsd, usize);
3004}
3005
3006JEMALLOC_NOINLINE
3007void
3008free_default(void *ptr) {
3009 UTRACE(ptr, 0, 0);
3010 if (likely(ptr != NULL)) {
3011 /*
3012 * We avoid setting up tsd fully (e.g. tcache, arena binding)
3013 * based on only free() calls -- other activities trigger the
3014 * minimal to full transition. This is because free() may
3015 * happen during thread shutdown after tls deallocation: if a
3016 * thread never had any malloc activities until then, a
3017 * fully-setup tsd won't be destructed properly.
3018 */
3019 tsd_t *tsd = tsd_fetch_min();
3020 check_entry_exit_locking(tsd_tsdn(tsd));
3021
3022 if (likely(tsd_fast(tsd))) {
3023 tcache_t *tcache = tcache_get_from_ind(tsd,
3024 TCACHE_IND_AUTOMATIC, /* slow */ false,
3025 /* is_alloc */ false);
3026 ifree(tsd, ptr, tcache, /* slow */ false);
3027 } else {
3028 tcache_t *tcache = tcache_get_from_ind(tsd,
3029 TCACHE_IND_AUTOMATIC, /* slow */ true,
3030 /* is_alloc */ false);
3031 uintptr_t args_raw[3] = {(uintptr_t)ptr};
3032 hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
3033 ifree(tsd, ptr, tcache, /* slow */ true);
3034 }
3035
3036 check_entry_exit_locking(tsd_tsdn(tsd));
3037 }
3038}
3039
3040JEMALLOC_ALWAYS_INLINE bool
3041free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {
3042 /*
3043 * free_fastpath do not handle two uncommon cases: 1) sampled profiled
3044 * objects and 2) sampled junk & stash for use-after-free detection.
3045 * Both have special alignments which are used to escape the fastpath.
3046 *
3047 * prof_sample is page-aligned, which covers the UAF check when both
3048 * are enabled (the assertion below). Avoiding redundant checks since
3049 * this is on the fastpath -- at most one runtime branch from this.
3050 */
3051 if (config_debug && cache_bin_nonfast_aligned(ptr)) {
3052 assert(prof_sample_aligned(ptr));
3053 }
3054
3055 if (config_prof && check_prof) {
3056 /* When prof is enabled, the prof_sample alignment is enough. */
3057 if (prof_sample_aligned(ptr)) {
3058 return true;
3059 } else {
3060 return false;
3061 }
3062 }
3063
3064 if (config_uaf_detection) {
3065 if (cache_bin_nonfast_aligned(ptr)) {
3066 return true;
3067 } else {
3068 return false;
3069 }
3070 }
3071
3072 return false;
3073}
3074
3075/* Returns whether or not the free attempt was successful. */
3076JEMALLOC_ALWAYS_INLINE
3077bool free_fastpath(void *ptr, size_t size, bool size_hint) {
3078 tsd_t *tsd = tsd_get(false);
3079 /* The branch gets optimized away unless tsd_get_allocates(). */
3080 if (unlikely(tsd == NULL)) {
3081 return false;
3082 }
3083 /*
3084 * The tsd_fast() / initialized checks are folded into the branch
3085 * testing (deallocated_after >= threshold) later in this function.
3086 * The threshold will be set to 0 when !tsd_fast.
3087 */
3088 assert(tsd_fast(tsd) ||
3089 *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0);
3090
3091 emap_alloc_ctx_t alloc_ctx;
3092 if (!size_hint) {
3093 bool err = emap_alloc_ctx_try_lookup_fast(tsd,
3094 &arena_emap_global, ptr, &alloc_ctx);
3095
3096 /* Note: profiled objects will have alloc_ctx.slab set */
3097 if (unlikely(err || !alloc_ctx.slab ||
3098 free_fastpath_nonfast_aligned(ptr,
3099 /* check_prof */ false))) {
3100 return false;
3101 }
3102 assert(alloc_ctx.szind != SC_NSIZES);
3103 } else {
3104 /*
3105 * Check for both sizes that are too large, and for sampled /
3106 * special aligned objects. The alignment check will also check
3107 * for null ptr.
3108 */
3109 if (unlikely(size > SC_LOOKUP_MAXCLASS ||
3110 free_fastpath_nonfast_aligned(ptr,
3111 /* check_prof */ true))) {
3112 return false;
3113 }
3114 alloc_ctx.szind = sz_size2index_lookup(size);
3115 /* Max lookup class must be small. */
3116 assert(alloc_ctx.szind < SC_NBINS);
3117 /* This is a dead store, except when opt size checking is on. */
3118 alloc_ctx.slab = true;
3119 }
3120 /*
3121 * Currently the fastpath only handles small sizes. The branch on
3122 * SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking
3123 * tcache szind upper limit (i.e. tcache_maxclass) as well.
3124 */
3125 assert(alloc_ctx.slab);
3126
3127 uint64_t deallocated, threshold;
3128 te_free_fastpath_ctx(tsd, &deallocated, &threshold);
3129
3130 size_t usize = sz_index2size(alloc_ctx.szind);
3131 uint64_t deallocated_after = deallocated + usize;
3132 /*
3133 * Check for events and tsd non-nominal (fast_threshold will be set to
3134 * 0) in a single branch. Note that this handles the uninitialized case
3135 * as well (TSD init will be triggered on the non-fastpath). Therefore
3136 * anything depends on a functional TSD (e.g. the alloc_ctx sanity check
3137 * below) needs to be after this branch.
3138 */
3139 if (unlikely(deallocated_after >= threshold)) {
3140 return false;
3141 }
3142 assert(tsd_fast(tsd));
3143 bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
3144 if (fail) {
3145 /* See the comment in isfree. */
3146 return true;
3147 }
3148
3149 tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC,
3150 /* slow */ false, /* is_alloc */ false);
3151 cache_bin_t *bin = &tcache->bins[alloc_ctx.szind];
3152
3153 /*
3154 * If junking were enabled, this is where we would do it. It's not
3155 * though, since we ensured above that we're on the fast path. Assert
3156 * that to double-check.
3157 */
3158 assert(!opt_junk_free);
3159
3160 if (!cache_bin_dalloc_easy(bin, ptr)) {
3161 return false;
3162 }
3163
3164 *tsd_thread_deallocatedp_get(tsd) = deallocated_after;
3165
3166 return true;
3167}
3168
3169JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3170je_free(void *ptr) {
3171 LOG("core.free.entry", "ptr: %p", ptr);
3172
3173 if (!free_fastpath(ptr, 0, false)) {
3174 free_default(ptr);
3175 }
3176
3177 LOG("core.free.exit", "");
3178}
3179
3180/*
3181 * End malloc(3)-compatible functions.
3182 */
3183/******************************************************************************/
3184/*
3185 * Begin non-standard override functions.
3186 */
3187
3188#ifdef JEMALLOC_OVERRIDE_MEMALIGN
3189JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3190void JEMALLOC_NOTHROW *
3191JEMALLOC_ATTR(malloc)
3192je_memalign(size_t alignment, size_t size) {
3193 void *ret;
3194 static_opts_t sopts;
3195 dynamic_opts_t dopts;
3196
3197 LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
3198 size);
3199
3200 static_opts_init(&sopts);
3201 dynamic_opts_init(&dopts);
3202
3203 sopts.min_alignment = 1;
3204 sopts.oom_string =
3205 "<jemalloc>: Error allocating aligned memory: out of memory\n";
3206 sopts.invalid_alignment_string =
3207 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
3208 sopts.null_out_result_on_error = true;
3209
3210 dopts.result = &ret;
3211 dopts.num_items = 1;
3212 dopts.item_size = size;
3213 dopts.alignment = alignment;
3214
3215 imalloc(&sopts, &dopts);
3216 if (sopts.slow) {
3217 uintptr_t args[3] = {alignment, size};
3218 hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret,
3219 args);
3220 }
3221
3222 LOG("core.memalign.exit", "result: %p", ret);
3223 return ret;
3224}
3225#endif
3226
3227#ifdef JEMALLOC_OVERRIDE_VALLOC
3228JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3229void JEMALLOC_NOTHROW *
3230JEMALLOC_ATTR(malloc)
3231je_valloc(size_t size) {
3232 void *ret;
3233
3234 static_opts_t sopts;
3235 dynamic_opts_t dopts;
3236
3237 LOG("core.valloc.entry", "size: %zu\n", size);
3238
3239 static_opts_init(&sopts);
3240 dynamic_opts_init(&dopts);
3241
3242 sopts.null_out_result_on_error = true;
3243 sopts.min_alignment = PAGE;
3244 sopts.oom_string =
3245 "<jemalloc>: Error allocating aligned memory: out of memory\n";
3246 sopts.invalid_alignment_string =
3247 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
3248
3249 dopts.result = &ret;
3250 dopts.num_items = 1;
3251 dopts.item_size = size;
3252 dopts.alignment = PAGE;
3253
3254 imalloc(&sopts, &dopts);
3255 if (sopts.slow) {
3256 uintptr_t args[3] = {size};
3257 hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
3258 }
3259
3260 LOG("core.valloc.exit", "result: %p\n", ret);
3261 return ret;
3262}
3263#endif
3264
3265#ifdef JEMALLOC_OVERRIDE_PVALLOC
3266JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3267void JEMALLOC_NOTHROW *
3268JEMALLOC_ATTR(malloc)
3269je_pvalloc(size_t size) {
3270 void *ret;
3271
3272 static_opts_t sopts;
3273 dynamic_opts_t dopts;
3274
3275 LOG("core.pvalloc.entry", "size: %zu\n", size);
3276
3277 static_opts_init(&sopts);
3278 dynamic_opts_init(&dopts);
3279
3280 sopts.null_out_result_on_error = true;
3281 sopts.min_alignment = PAGE;
3282 sopts.oom_string =
3283 "<jemalloc>: Error allocating aligned memory: out of memory\n";
3284 sopts.invalid_alignment_string =
3285 "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
3286
3287 dopts.result = &ret;
3288 dopts.num_items = 1;
3289 /*
3290 * This is the only difference from je_valloc - size is rounded up to
3291 * a PAGE multiple.
3292 */
3293 dopts.item_size = PAGE_CEILING(size);
3294 dopts.alignment = PAGE;
3295
3296 imalloc(&sopts, &dopts);
3297 if (sopts.slow) {
3298 uintptr_t args[3] = {size};
3299 hook_invoke_alloc(hook_alloc_pvalloc, ret, (uintptr_t)ret,
3300 args);
3301 }
3302
3303 LOG("core.pvalloc.exit", "result: %p\n", ret);
3304 return ret;
3305}
3306#endif
3307
3308#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
3309/*
3310 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
3311 * to inconsistently reference libc's malloc(3)-compatible functions
3312 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
3313 *
3314 * These definitions interpose hooks in glibc. The functions are actually
3315 * passed an extra argument for the caller return address, which will be
3316 * ignored.
3317 */
3318#include <features.h> // defines __GLIBC__ if we are compiling against glibc
3319
3320JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
3321JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
3322JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
3323# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
3324JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
3325 je_memalign;
3326# endif
3327
3328# ifdef __GLIBC__
3329/*
3330 * To enable static linking with glibc, the libc specific malloc interface must
3331 * be implemented also, so none of glibc's malloc.o functions are added to the
3332 * link.
3333 */
3334# define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
3335/* To force macro expansion of je_ prefix before stringification. */
3336# define PREALIAS(je_fn) ALIAS(je_fn)
3337# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
3338void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
3339# endif
3340# ifdef JEMALLOC_OVERRIDE___LIBC_FREE
3341void __libc_free(void* ptr) PREALIAS(je_free);
3342# endif
3343# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
3344void *__libc_malloc(size_t size) PREALIAS(je_malloc);
3345# endif
3346# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
3347void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
3348# endif
3349# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
3350void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
3351# endif
3352# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
3353void *__libc_valloc(size_t size) PREALIAS(je_valloc);
3354# endif
3355# ifdef JEMALLOC_OVERRIDE___LIBC_PVALLOC
3356void *__libc_pvalloc(size_t size) PREALIAS(je_pvalloc);
3357# endif
3358# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
3359int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
3360# endif
3361# undef PREALIAS
3362# undef ALIAS
3363# endif
3364#endif
3365
3366/*
3367 * End non-standard override functions.
3368 */
3369/******************************************************************************/
3370/*
3371 * Begin non-standard functions.
3372 */
3373
3374JEMALLOC_ALWAYS_INLINE unsigned
3375mallocx_tcache_get(int flags) {
3376 if (likely((flags & MALLOCX_TCACHE_MASK) == 0)) {
3377 return TCACHE_IND_AUTOMATIC;
3378 } else if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3379 return TCACHE_IND_NONE;
3380 } else {
3381 return MALLOCX_TCACHE_GET(flags);
3382 }
3383}
3384
3385JEMALLOC_ALWAYS_INLINE unsigned
3386mallocx_arena_get(int flags) {
3387 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
3388 return MALLOCX_ARENA_GET(flags);
3389 } else {
3390 return ARENA_IND_AUTOMATIC;
3391 }
3392}
3393
3394#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
3395
3396#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
3397#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \
3398 JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y)
3399
3400typedef struct {
3401 void *ptr;
3402 size_t size;
3403} smallocx_return_t;
3404
3405JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3406smallocx_return_t JEMALLOC_NOTHROW
3407/*
3408 * The attribute JEMALLOC_ATTR(malloc) cannot be used due to:
3409 * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488
3410 */
3411JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
3412 (size_t size, int flags) {
3413 /*
3414 * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be
3415 * used here because it makes writing beyond the `size`
3416 * of the `ptr` undefined behavior, but the objective
3417 * of this function is to allow writing beyond `size`
3418 * up to `smallocx_return_t::size`.
3419 */
3420 smallocx_return_t ret;
3421 static_opts_t sopts;
3422 dynamic_opts_t dopts;
3423
3424 LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags);
3425
3426 static_opts_init(&sopts);
3427 dynamic_opts_init(&dopts);
3428
3429 sopts.assert_nonempty_alloc = true;
3430 sopts.null_out_result_on_error = true;
3431 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
3432 sopts.usize = true;
3433
3434 dopts.result = &ret.ptr;
3435 dopts.num_items = 1;
3436 dopts.item_size = size;
3437 if (unlikely(flags != 0)) {
3438 dopts.alignment = MALLOCX_ALIGN_GET(flags);
3439 dopts.zero = MALLOCX_ZERO_GET(flags);
3440 dopts.tcache_ind = mallocx_tcache_get(flags);
3441 dopts.arena_ind = mallocx_arena_get(flags);
3442 }
3443
3444 imalloc(&sopts, &dopts);
3445 assert(dopts.usize == je_nallocx(size, flags));
3446 ret.size = dopts.usize;
3447
3448 LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size);
3449 return ret;
3450}
3451#undef JEMALLOC_SMALLOCX_CONCAT_HELPER
3452#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2
3453#endif
3454
3455JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3456void JEMALLOC_NOTHROW *
3457JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
3458je_mallocx(size_t size, int flags) {
3459 void *ret;
3460 static_opts_t sopts;
3461 dynamic_opts_t dopts;
3462
3463 LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
3464
3465 static_opts_init(&sopts);
3466 dynamic_opts_init(&dopts);
3467
3468 sopts.assert_nonempty_alloc = true;
3469 sopts.null_out_result_on_error = true;
3470 sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
3471
3472 dopts.result = &ret;
3473 dopts.num_items = 1;
3474 dopts.item_size = size;
3475 if (unlikely(flags != 0)) {
3476 dopts.alignment = MALLOCX_ALIGN_GET(flags);
3477 dopts.zero = MALLOCX_ZERO_GET(flags);
3478 dopts.tcache_ind = mallocx_tcache_get(flags);
3479 dopts.arena_ind = mallocx_arena_get(flags);
3480 }
3481
3482 imalloc(&sopts, &dopts);
3483 if (sopts.slow) {
3484 uintptr_t args[3] = {size, flags};
3485 hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret,
3486 args);
3487 }
3488
3489 LOG("core.mallocx.exit", "result: %p", ret);
3490 return ret;
3491}
3492
3493static void *
3494irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
3495 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
3496 prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
3497 void *p;
3498
3499 if (tctx == NULL) {
3500 return NULL;
3501 }
3502
3503 alignment = prof_sample_align(alignment);
3504 if (usize <= SC_SMALL_MAXCLASS) {
3505 p = iralloct(tsdn, old_ptr, old_usize,
3506 SC_LARGE_MINCLASS, alignment, zero, tcache,
3507 arena, hook_args);
3508 if (p == NULL) {
3509 return NULL;
3510 }
3511 arena_prof_promote(tsdn, p, usize);
3512 } else {
3513 p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
3514 tcache, arena, hook_args);
3515 }
3516 assert(prof_sample_aligned(p));
3517
3518 return p;
3519}
3520
3521JEMALLOC_ALWAYS_INLINE void *
3522irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
3523 size_t alignment, size_t usize, bool zero, tcache_t *tcache,
3524 arena_t *arena, emap_alloc_ctx_t *alloc_ctx,
3525 hook_ralloc_args_t *hook_args) {
3526 prof_info_t old_prof_info;
3527 prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info);
3528 bool prof_active = prof_active_get_unlocked();
3529 bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
3530 prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
3531 void *p;
3532 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
3533 p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
3534 usize, alignment, zero, tcache, arena, tctx, hook_args);
3535 } else {
3536 p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
3537 zero, tcache, arena, hook_args);
3538 }
3539 if (unlikely(p == NULL)) {
3540 prof_alloc_rollback(tsd, tctx);
3541 return NULL;
3542 }
3543 assert(usize == isalloc(tsd_tsdn(tsd), p));
3544 prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr,
3545 old_usize, &old_prof_info, sample_event);
3546
3547 return p;
3548}
3549
3550static void *
3551do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
3552 void *p;
3553 tsd_t *tsd;
3554 size_t usize;
3555 size_t old_usize;
3556 size_t alignment = MALLOCX_ALIGN_GET(flags);
3557 arena_t *arena;
3558
3559 assert(ptr != NULL);
3560 assert(size != 0);
3561 assert(malloc_initialized() || IS_INITIALIZER);
3562 tsd = tsd_fetch();
3563 check_entry_exit_locking(tsd_tsdn(tsd));
3564
3565 bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
3566
3567 unsigned arena_ind = mallocx_arena_get(flags);
3568 if (arena_get_from_ind(tsd, arena_ind, &arena)) {
3569 goto label_oom;
3570 }
3571
3572 unsigned tcache_ind = mallocx_tcache_get(flags);
3573 tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
3574 /* slow */ true, /* is_alloc */ true);
3575
3576 emap_alloc_ctx_t alloc_ctx;
3577 emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
3578 &alloc_ctx);
3579 assert(alloc_ctx.szind != SC_NSIZES);
3580 old_usize = sz_index2size(alloc_ctx.szind);
3581 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
3582 if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
3583 goto label_oom;
3584 }
3585
3586 hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size,
3587 flags, 0}};
3588 if (config_prof && opt_prof) {
3589 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize,
3590 zero, tcache, arena, &alloc_ctx, &hook_args);
3591 if (unlikely(p == NULL)) {
3592 goto label_oom;
3593 }
3594 } else {
3595 p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
3596 zero, tcache, arena, &hook_args);
3597 if (unlikely(p == NULL)) {
3598 goto label_oom;
3599 }
3600 assert(usize == isalloc(tsd_tsdn(tsd), p));
3601 }
3602 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
3603 thread_alloc_event(tsd, usize);
3604 thread_dalloc_event(tsd, old_usize);
3605
3606 UTRACE(ptr, size, p);
3607 check_entry_exit_locking(tsd_tsdn(tsd));
3608
3609 if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize
3610 && !zero) {
3611 size_t excess_len = usize - old_usize;
3612 void *excess_start = (void *)((uintptr_t)p + old_usize);
3613 junk_alloc_callback(excess_start, excess_len);
3614 }
3615
3616 return p;
3617label_oom:
3618 if (config_xmalloc && unlikely(opt_xmalloc)) {
3619 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
3620 abort();
3621 }
3622 UTRACE(ptr, size, 0);
3623 check_entry_exit_locking(tsd_tsdn(tsd));
3624
3625 return NULL;
3626}
3627
3628JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3629void JEMALLOC_NOTHROW *
3630JEMALLOC_ALLOC_SIZE(2)
3631je_rallocx(void *ptr, size_t size, int flags) {
3632 LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3633 size, flags);
3634 void *ret = do_rallocx(ptr, size, flags, false);
3635 LOG("core.rallocx.exit", "result: %p", ret);
3636 return ret;
3637}
3638
3639static void *
3640do_realloc_nonnull_zero(void *ptr) {
3641 if (config_stats) {
3642 atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED);
3643 }
3644 if (opt_zero_realloc_action == zero_realloc_action_alloc) {
3645 /*
3646 * The user might have gotten an alloc setting while expecting a
3647 * free setting. If that's the case, we at least try to
3648 * reduce the harm, and turn off the tcache while allocating, so
3649 * that we'll get a true first fit.
3650 */
3651 return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true);
3652 } else if (opt_zero_realloc_action == zero_realloc_action_free) {
3653 UTRACE(ptr, 0, 0);
3654 tsd_t *tsd = tsd_fetch();
3655 check_entry_exit_locking(tsd_tsdn(tsd));
3656
3657 tcache_t *tcache = tcache_get_from_ind(tsd,
3658 TCACHE_IND_AUTOMATIC, /* slow */ true,
3659 /* is_alloc */ false);
3660 uintptr_t args[3] = {(uintptr_t)ptr, 0};
3661 hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
3662 ifree(tsd, ptr, tcache, true);
3663
3664 check_entry_exit_locking(tsd_tsdn(tsd));
3665 return NULL;
3666 } else {
3667 safety_check_fail("Called realloc(non-null-ptr, 0) with "
3668 "zero_realloc:abort set\n");
3669 /* In real code, this will never run; the safety check failure
3670 * will call abort. In the unit test, we just want to bail out
3671 * without corrupting internal state that the test needs to
3672 * finish.
3673 */
3674 return NULL;
3675 }
3676}
3677
3678JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3679void JEMALLOC_NOTHROW *
3680JEMALLOC_ALLOC_SIZE(2)
3681je_realloc(void *ptr, size_t size) {
3682 LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
3683
3684 if (likely(ptr != NULL && size != 0)) {
3685 void *ret = do_rallocx(ptr, size, 0, true);
3686 LOG("core.realloc.exit", "result: %p", ret);
3687 return ret;
3688 } else if (ptr != NULL && size == 0) {
3689 void *ret = do_realloc_nonnull_zero(ptr);
3690 LOG("core.realloc.exit", "result: %p", ret);
3691 return ret;
3692 } else {
3693 /* realloc(NULL, size) is equivalent to malloc(size). */
3694 void *ret;
3695
3696 static_opts_t sopts;
3697 dynamic_opts_t dopts;
3698
3699 static_opts_init(&sopts);
3700 dynamic_opts_init(&dopts);
3701
3702 sopts.null_out_result_on_error = true;
3703 sopts.set_errno_on_error = true;
3704 sopts.oom_string =
3705 "<jemalloc>: Error in realloc(): out of memory\n";
3706
3707 dopts.result = &ret;
3708 dopts.num_items = 1;
3709 dopts.item_size = size;
3710
3711 imalloc(&sopts, &dopts);
3712 if (sopts.slow) {
3713 uintptr_t args[3] = {(uintptr_t)ptr, size};
3714 hook_invoke_alloc(hook_alloc_realloc, ret,
3715 (uintptr_t)ret, args);
3716 }
3717 LOG("core.realloc.exit", "result: %p", ret);
3718 return ret;
3719 }
3720}
3721
3722JEMALLOC_ALWAYS_INLINE size_t
3723ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
3724 size_t extra, size_t alignment, bool zero) {
3725 size_t newsize;
3726
3727 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero,
3728 &newsize)) {
3729 return old_usize;
3730 }
3731
3732 return newsize;
3733}
3734
3735static size_t
3736ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
3737 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
3738 /* Sampled allocation needs to be page aligned. */
3739 if (tctx == NULL || !prof_sample_aligned(ptr)) {
3740 return old_usize;
3741 }
3742
3743 return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
3744 zero);
3745}
3746
3747JEMALLOC_ALWAYS_INLINE size_t
3748ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
3749 size_t extra, size_t alignment, bool zero, emap_alloc_ctx_t *alloc_ctx) {
3750 /*
3751 * old_prof_info is only used for asserting that the profiling info
3752 * isn't changed by the ixalloc() call.
3753 */
3754 prof_info_t old_prof_info;
3755 prof_info_get(tsd, ptr, alloc_ctx, &old_prof_info);
3756
3757 /*
3758 * usize isn't knowable before ixalloc() returns when extra is non-zero.
3759 * Therefore, compute its maximum possible value and use that in
3760 * prof_alloc_prep() to decide whether to capture a backtrace.
3761 * prof_realloc() will use the actual usize to decide whether to sample.
3762 */
3763 size_t usize_max;
3764 if (aligned_usize_get(size + extra, alignment, &usize_max, NULL,
3765 false)) {
3766 /*
3767 * usize_max is out of range, and chances are that allocation
3768 * will fail, but use the maximum possible value and carry on
3769 * with prof_alloc_prep(), just in case allocation succeeds.
3770 */
3771 usize_max = SC_LARGE_MAXCLASS;
3772 }
3773 bool prof_active = prof_active_get_unlocked();
3774 bool sample_event = te_prof_sample_event_lookahead(tsd, usize_max);
3775 prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
3776
3777 size_t usize;
3778 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
3779 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
3780 size, extra, alignment, zero, tctx);
3781 } else {
3782 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
3783 extra, alignment, zero);
3784 }
3785
3786 /*
3787 * At this point we can still safely get the original profiling
3788 * information associated with the ptr, because (a) the edata_t object
3789 * associated with the ptr still lives and (b) the profiling info
3790 * fields are not touched. "(a)" is asserted in the outer je_xallocx()
3791 * function, and "(b)" is indirectly verified below by checking that
3792 * the alloc_tctx field is unchanged.
3793 */
3794 prof_info_t prof_info;
3795 if (usize == old_usize) {
3796 prof_info_get(tsd, ptr, alloc_ctx, &prof_info);
3797 prof_alloc_rollback(tsd, tctx);
3798 } else {
3799 prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
3800 assert(usize <= usize_max);
3801 sample_event = te_prof_sample_event_lookahead(tsd, usize);
3802 prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr,
3803 old_usize, &prof_info, sample_event);
3804 }
3805
3806 assert(old_prof_info.alloc_tctx == prof_info.alloc_tctx);
3807 return usize;
3808}
3809
3810JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3811je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
3812 tsd_t *tsd;
3813 size_t usize, old_usize;
3814 size_t alignment = MALLOCX_ALIGN_GET(flags);
3815 bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
3816
3817 LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
3818 "flags: %d", ptr, size, extra, flags);
3819
3820 assert(ptr != NULL);
3821 assert(size != 0);
3822 assert(SIZE_T_MAX - size >= extra);
3823 assert(malloc_initialized() || IS_INITIALIZER);
3824 tsd = tsd_fetch();
3825 check_entry_exit_locking(tsd_tsdn(tsd));
3826
3827 /*
3828 * old_edata is only for verifying that xallocx() keeps the edata_t
3829 * object associated with the ptr (though the content of the edata_t
3830 * object can be changed).
3831 */
3832 edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd),
3833 &arena_emap_global, ptr);
3834
3835 emap_alloc_ctx_t alloc_ctx;
3836 emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
3837 &alloc_ctx);
3838 assert(alloc_ctx.szind != SC_NSIZES);
3839 old_usize = sz_index2size(alloc_ctx.szind);
3840 assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
3841 /*
3842 * The API explicitly absolves itself of protecting against (size +
3843 * extra) numerical overflow, but we may need to clamp extra to avoid
3844 * exceeding SC_LARGE_MAXCLASS.
3845 *
3846 * Ordinarily, size limit checking is handled deeper down, but here we
3847 * have to check as part of (size + extra) clamping, since we need the
3848 * clamped value in the above helper functions.
3849 */
3850 if (unlikely(size > SC_LARGE_MAXCLASS)) {
3851 usize = old_usize;
3852 goto label_not_resized;
3853 }
3854 if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
3855 extra = SC_LARGE_MAXCLASS - size;
3856 }
3857
3858 if (config_prof && opt_prof) {
3859 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
3860 alignment, zero, &alloc_ctx);
3861 } else {
3862 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
3863 extra, alignment, zero);
3864 }
3865
3866 /*
3867 * xallocx() should keep using the same edata_t object (though its
3868 * content can be changed).
3869 */
3870 assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr)
3871 == old_edata);
3872
3873 if (unlikely(usize == old_usize)) {
3874 goto label_not_resized;
3875 }
3876 thread_alloc_event(tsd, usize);
3877 thread_dalloc_event(tsd, old_usize);
3878
3879 if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize &&
3880 !zero) {
3881 size_t excess_len = usize - old_usize;
3882 void *excess_start = (void *)((uintptr_t)ptr + old_usize);
3883 junk_alloc_callback(excess_start, excess_len);
3884 }
3885label_not_resized:
3886 if (unlikely(!tsd_fast(tsd))) {
3887 uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
3888 hook_invoke_expand(hook_expand_xallocx, ptr, old_usize,
3889 usize, (uintptr_t)usize, args);
3890 }
3891
3892 UTRACE(ptr, size, ptr);
3893 check_entry_exit_locking(tsd_tsdn(tsd));
3894
3895 LOG("core.xallocx.exit", "result: %zu", usize);
3896 return usize;
3897}
3898
3899JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3900JEMALLOC_ATTR(pure)
3901je_sallocx(const void *ptr, int flags) {
3902 size_t usize;
3903 tsdn_t *tsdn;
3904
3905 LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
3906
3907 assert(malloc_initialized() || IS_INITIALIZER);
3908 assert(ptr != NULL);
3909
3910 tsdn = tsdn_fetch();
3911 check_entry_exit_locking(tsdn);
3912
3913 if (config_debug || force_ivsalloc) {
3914 usize = ivsalloc(tsdn, ptr);
3915 assert(force_ivsalloc || usize != 0);
3916 } else {
3917 usize = isalloc(tsdn, ptr);
3918 }
3919
3920 check_entry_exit_locking(tsdn);
3921
3922 LOG("core.sallocx.exit", "result: %zu", usize);
3923 return usize;
3924}
3925
3926JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3927je_dallocx(void *ptr, int flags) {
3928 LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
3929
3930 assert(ptr != NULL);
3931 assert(malloc_initialized() || IS_INITIALIZER);
3932
3933 tsd_t *tsd = tsd_fetch_min();
3934 bool fast = tsd_fast(tsd);
3935 check_entry_exit_locking(tsd_tsdn(tsd));
3936
3937 unsigned tcache_ind = mallocx_tcache_get(flags);
3938 tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
3939 /* is_alloc */ false);
3940
3941 UTRACE(ptr, 0, 0);
3942 if (likely(fast)) {
3943 tsd_assert_fast(tsd);
3944 ifree(tsd, ptr, tcache, false);
3945 } else {
3946 uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
3947 hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
3948 ifree(tsd, ptr, tcache, true);
3949 }
3950 check_entry_exit_locking(tsd_tsdn(tsd));
3951
3952 LOG("core.dallocx.exit", "");
3953}
3954
3955JEMALLOC_ALWAYS_INLINE size_t
3956inallocx(tsdn_t *tsdn, size_t size, int flags) {
3957 check_entry_exit_locking(tsdn);
3958 size_t usize;
3959 /* In case of out of range, let the user see it rather than fail. */
3960 aligned_usize_get(size, MALLOCX_ALIGN_GET(flags), &usize, NULL, false);
3961 check_entry_exit_locking(tsdn);
3962 return usize;
3963}
3964
3965JEMALLOC_NOINLINE void
3966sdallocx_default(void *ptr, size_t size, int flags) {
3967 assert(ptr != NULL);
3968 assert(malloc_initialized() || IS_INITIALIZER);
3969
3970 tsd_t *tsd = tsd_fetch_min();
3971 bool fast = tsd_fast(tsd);
3972 size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
3973 check_entry_exit_locking(tsd_tsdn(tsd));
3974
3975 unsigned tcache_ind = mallocx_tcache_get(flags);
3976 tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
3977 /* is_alloc */ false);
3978
3979 UTRACE(ptr, 0, 0);
3980 if (likely(fast)) {
3981 tsd_assert_fast(tsd);
3982 isfree(tsd, ptr, usize, tcache, false);
3983 } else {
3984 uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
3985 hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
3986 isfree(tsd, ptr, usize, tcache, true);
3987 }
3988 check_entry_exit_locking(tsd_tsdn(tsd));
3989}
3990
3991JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3992je_sdallocx(void *ptr, size_t size, int flags) {
3993 LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3994 size, flags);
3995
3996 if (flags != 0 || !free_fastpath(ptr, size, true)) {
3997 sdallocx_default(ptr, size, flags);
3998 }
3999
4000 LOG("core.sdallocx.exit", "");
4001}
4002
4003void JEMALLOC_NOTHROW
4004je_sdallocx_noflags(void *ptr, size_t size) {
4005 LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
4006 size);
4007
4008 if (!free_fastpath(ptr, size, true)) {
4009 sdallocx_default(ptr, size, 0);
4010 }
4011
4012 LOG("core.sdallocx.exit", "");
4013}
4014
4015JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
4016JEMALLOC_ATTR(pure)
4017je_nallocx(size_t size, int flags) {
4018 size_t usize;
4019 tsdn_t *tsdn;
4020
4021 assert(size != 0);
4022
4023 if (unlikely(malloc_init())) {
4024 LOG("core.nallocx.exit", "result: %zu", ZU(0));
4025 return 0;
4026 }
4027
4028 tsdn = tsdn_fetch();
4029 check_entry_exit_locking(tsdn);
4030
4031 usize = inallocx(tsdn, size, flags);
4032 if (unlikely(usize > SC_LARGE_MAXCLASS)) {
4033 LOG("core.nallocx.exit", "result: %zu", ZU(0));
4034 return 0;
4035 }
4036
4037 check_entry_exit_locking(tsdn);
4038 LOG("core.nallocx.exit", "result: %zu", usize);
4039 return usize;
4040}
4041
4042JEMALLOC_EXPORT int JEMALLOC_NOTHROW
4043je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
4044 size_t newlen) {
4045 int ret;
4046 tsd_t *tsd;
4047
4048 LOG("core.mallctl.entry", "name: %s", name);
4049
4050 if (unlikely(malloc_init())) {
4051 LOG("core.mallctl.exit", "result: %d", EAGAIN);
4052 return EAGAIN;
4053 }
4054
4055 tsd = tsd_fetch();
4056 check_entry_exit_locking(tsd_tsdn(tsd));
4057 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
4058 check_entry_exit_locking(tsd_tsdn(tsd));
4059
4060 LOG("core.mallctl.exit", "result: %d", ret);
4061 return ret;
4062}
4063
4064JEMALLOC_EXPORT int JEMALLOC_NOTHROW
4065je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
4066 int ret;
4067
4068 LOG("core.mallctlnametomib.entry", "name: %s", name);
4069
4070 if (unlikely(malloc_init())) {
4071 LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
4072 return EAGAIN;
4073 }
4074
4075 tsd_t *tsd = tsd_fetch();
4076 check_entry_exit_locking(tsd_tsdn(tsd));
4077 ret = ctl_nametomib(tsd, name, mibp, miblenp);
4078 check_entry_exit_locking(tsd_tsdn(tsd));
4079
4080 LOG("core.mallctlnametomib.exit", "result: %d", ret);
4081 return ret;
4082}
4083
4084JEMALLOC_EXPORT int JEMALLOC_NOTHROW
4085je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
4086 void *newp, size_t newlen) {
4087 int ret;
4088 tsd_t *tsd;
4089
4090 LOG("core.mallctlbymib.entry", "");
4091
4092 if (unlikely(malloc_init())) {
4093 LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
4094 return EAGAIN;
4095 }
4096
4097 tsd = tsd_fetch();
4098 check_entry_exit_locking(tsd_tsdn(tsd));
4099 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
4100 check_entry_exit_locking(tsd_tsdn(tsd));
4101 LOG("core.mallctlbymib.exit", "result: %d", ret);
4102 return ret;
4103}
4104
4105#define STATS_PRINT_BUFSIZE 65536
4106JEMALLOC_EXPORT void JEMALLOC_NOTHROW
4107je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
4108 const char *opts) {
4109 tsdn_t *tsdn;
4110
4111 LOG("core.malloc_stats_print.entry", "");
4112
4113 tsdn = tsdn_fetch();
4114 check_entry_exit_locking(tsdn);
4115
4116 if (config_debug) {
4117 stats_print(write_cb, cbopaque, opts);
4118 } else {
4119 buf_writer_t buf_writer;
4120 buf_writer_init(tsdn, &buf_writer, write_cb, cbopaque, NULL,
4121 STATS_PRINT_BUFSIZE);
4122 stats_print(buf_writer_cb, &buf_writer, opts);
4123 buf_writer_terminate(tsdn, &buf_writer);
4124 }
4125
4126 check_entry_exit_locking(tsdn);
4127 LOG("core.malloc_stats_print.exit", "");
4128}
4129#undef STATS_PRINT_BUFSIZE
4130
4131JEMALLOC_ALWAYS_INLINE size_t
4132je_malloc_usable_size_impl(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
4133 assert(malloc_initialized() || IS_INITIALIZER);
4134
4135 tsdn_t *tsdn = tsdn_fetch();
4136 check_entry_exit_locking(tsdn);
4137
4138 size_t ret;
4139 if (unlikely(ptr == NULL)) {
4140 ret = 0;
4141 } else {
4142 if (config_debug || force_ivsalloc) {
4143 ret = ivsalloc(tsdn, ptr);
4144 assert(force_ivsalloc || ret != 0);
4145 } else {
4146 ret = isalloc(tsdn, ptr);
4147 }
4148 }
4149 check_entry_exit_locking(tsdn);
4150
4151 return ret;
4152}
4153
4154JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
4155je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
4156 LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
4157
4158 size_t ret = je_malloc_usable_size_impl(ptr);
4159
4160 LOG("core.malloc_usable_size.exit", "result: %zu", ret);
4161 return ret;
4162}
4163
4164#ifdef JEMALLOC_HAVE_MALLOC_SIZE
4165JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
4166je_malloc_size(const void *ptr) {
4167 LOG("core.malloc_size.entry", "ptr: %p", ptr);
4168
4169 size_t ret = je_malloc_usable_size_impl(ptr);
4170
4171 LOG("core.malloc_size.exit", "result: %zu", ret);
4172 return ret;
4173}
4174#endif
4175
4176static void
4177batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) {
4178 assert(config_prof && opt_prof);
4179 bool prof_sample_event = te_prof_sample_event_lookahead(tsd,
4180 batch * usize);
4181 assert(!prof_sample_event);
4182 size_t surplus;
4183 prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd,
4184 (batch + 1) * usize, &surplus);
4185 assert(prof_sample_event);
4186 assert(surplus < usize);
4187}
4188
4189size_t
4190batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
4191 LOG("core.batch_alloc.entry",
4192 "ptrs: %p, num: %zu, size: %zu, flags: %d", ptrs, num, size, flags);
4193
4194 tsd_t *tsd = tsd_fetch();
4195 check_entry_exit_locking(tsd_tsdn(tsd));
4196
4197 size_t filled = 0;
4198
4199 if (unlikely(tsd == NULL || tsd_reentrancy_level_get(tsd) > 0)) {
4200 goto label_done;
4201 }
4202
4203 size_t alignment = MALLOCX_ALIGN_GET(flags);
4204 size_t usize;
4205 if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
4206 goto label_done;
4207 }
4208 szind_t ind = sz_size2index(usize);
4209 bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
4210
4211 /*
4212 * The cache bin and arena will be lazily initialized; it's hard to
4213 * know in advance whether each of them needs to be initialized.
4214 */
4215 cache_bin_t *bin = NULL;
4216 arena_t *arena = NULL;
4217
4218 size_t nregs = 0;
4219 if (likely(ind < SC_NBINS)) {
4220 nregs = bin_infos[ind].nregs;
4221 assert(nregs > 0);
4222 }
4223
4224 while (filled < num) {
4225 size_t batch = num - filled;
4226 size_t surplus = SIZE_MAX; /* Dead store. */
4227 bool prof_sample_event = config_prof && opt_prof
4228 && prof_active_get_unlocked()
4229 && te_prof_sample_event_lookahead_surplus(tsd,
4230 batch * usize, &surplus);
4231
4232 if (prof_sample_event) {
4233 /*
4234 * Adjust so that the batch does not trigger prof
4235 * sampling.
4236 */
4237 batch -= surplus / usize + 1;
4238 batch_alloc_prof_sample_assert(tsd, batch, usize);
4239 }
4240
4241 size_t progress = 0;
4242
4243 if (likely(ind < SC_NBINS) && batch >= nregs) {
4244 if (arena == NULL) {
4245 unsigned arena_ind = mallocx_arena_get(flags);
4246 if (arena_get_from_ind(tsd, arena_ind,
4247 &arena)) {
4248 goto label_done;
4249 }
4250 if (arena == NULL) {
4251 arena = arena_choose(tsd, NULL);
4252 }
4253 if (unlikely(arena == NULL)) {
4254 goto label_done;
4255 }
4256 }
4257 size_t arena_batch = batch - batch % nregs;
4258 size_t n = arena_fill_small_fresh(tsd_tsdn(tsd), arena,
4259 ind, ptrs + filled, arena_batch, zero);
4260 progress += n;
4261 filled += n;
4262 }
4263
4264 if (likely(ind < nhbins) && progress < batch) {
4265 if (bin == NULL) {
4266 unsigned tcache_ind = mallocx_tcache_get(flags);
4267 tcache_t *tcache = tcache_get_from_ind(tsd,
4268 tcache_ind, /* slow */ true,
4269 /* is_alloc */ true);
4270 if (tcache != NULL) {
4271 bin = &tcache->bins[ind];
4272 }
4273 }
4274 /*
4275 * If we don't have a tcache bin, we don't want to
4276 * immediately give up, because there's the possibility
4277 * that the user explicitly requested to bypass the
4278 * tcache, or that the user explicitly turned off the
4279 * tcache; in such cases, we go through the slow path,
4280 * i.e. the mallocx() call at the end of the while loop.
4281 */
4282 if (bin != NULL) {
4283 size_t bin_batch = batch - progress;
4284 /*
4285 * n can be less than bin_batch, meaning that
4286 * the cache bin does not have enough memory.
4287 * In such cases, we rely on the slow path,
4288 * i.e. the mallocx() call at the end of the
4289 * while loop, to fill in the cache, and in the
4290 * next iteration of the while loop, the tcache
4291 * will contain a lot of memory, and we can
4292 * harvest them here. Compared to the
4293 * alternative approach where we directly go to
4294 * the arena bins here, the overhead of our
4295 * current approach should usually be minimal,
4296 * since we never try to fetch more memory than
4297 * what a slab contains via the tcache. An
4298 * additional benefit is that the tcache will
4299 * not be empty for the next allocation request.
4300 */
4301 size_t n = cache_bin_alloc_batch(bin, bin_batch,
4302 ptrs + filled);
4303 if (config_stats) {
4304 bin->tstats.nrequests += n;
4305 }
4306 if (zero) {
4307 for (size_t i = 0; i < n; ++i) {
4308 memset(ptrs[filled + i], 0,
4309 usize);
4310 }
4311 }
4312 if (config_prof && opt_prof
4313 && unlikely(ind >= SC_NBINS)) {
4314 for (size_t i = 0; i < n; ++i) {
4315 prof_tctx_reset_sampled(tsd,
4316 ptrs[filled + i]);
4317 }
4318 }
4319 progress += n;
4320 filled += n;
4321 }
4322 }
4323
4324 /*
4325 * For thread events other than prof sampling, trigger them as
4326 * if there's a single allocation of size (n * usize). This is
4327 * fine because:
4328 * (a) these events do not alter the allocation itself, and
4329 * (b) it's possible that some event would have been triggered
4330 * multiple times, instead of only once, if the allocations
4331 * were handled individually, but it would do no harm (or
4332 * even be beneficial) to coalesce the triggerings.
4333 */
4334 thread_alloc_event(tsd, progress * usize);
4335
4336 if (progress < batch || prof_sample_event) {
4337 void *p = je_mallocx(size, flags);
4338 if (p == NULL) { /* OOM */
4339 break;
4340 }
4341 if (progress == batch) {
4342 assert(prof_sampled(tsd, p));
4343 }
4344 ptrs[filled++] = p;
4345 }
4346 }
4347
4348label_done:
4349 check_entry_exit_locking(tsd_tsdn(tsd));
4350 LOG("core.batch_alloc.exit", "result: %zu", filled);
4351 return filled;
4352}
4353
4354/*
4355 * End non-standard functions.
4356 */
4357/******************************************************************************/
4358/*
4359 * The following functions are used by threading libraries for protection of
4360 * malloc during fork().
4361 */
4362
4363/*
4364 * If an application creates a thread before doing any allocation in the main
4365 * thread, then calls fork(2) in the main thread followed by memory allocation
4366 * in the child process, a race can occur that results in deadlock within the
4367 * child: the main thread may have forked while the created thread had
4368 * partially initialized the allocator. Ordinarily jemalloc prevents
4369 * fork/malloc races via the following functions it registers during
4370 * initialization using pthread_atfork(), but of course that does no good if
4371 * the allocator isn't fully initialized at fork time. The following library
4372 * constructor is a partial solution to this problem. It may still be possible
4373 * to trigger the deadlock described above, but doing so would involve forking
4374 * via a library constructor that runs before jemalloc's runs.
4375 */
4376#ifndef JEMALLOC_JET
4377JEMALLOC_ATTR(constructor)
4378static void
4379jemalloc_constructor(void) {
4380 malloc_init();
4381}
4382#endif
4383
4384#ifndef JEMALLOC_MUTEX_INIT_CB
4385void
4386jemalloc_prefork(void)
4387#else
4388JEMALLOC_EXPORT void
4389_malloc_prefork(void)
4390#endif
4391{
4392 tsd_t *tsd;
4393 unsigned i, j, narenas;
4394 arena_t *arena;
4395
4396#ifdef JEMALLOC_MUTEX_INIT_CB
4397 if (!malloc_initialized()) {
4398 return;
4399 }
4400#endif
4401 assert(malloc_initialized());
4402
4403 tsd = tsd_fetch();
4404
4405 narenas = narenas_total_get();
4406
4407 witness_prefork(tsd_witness_tsdp_get(tsd));
4408 /* Acquire all mutexes in a safe order. */
4409 ctl_prefork(tsd_tsdn(tsd));
4410 tcache_prefork(tsd_tsdn(tsd));
4411 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
4412 if (have_background_thread) {
4413 background_thread_prefork0(tsd_tsdn(tsd));
4414 }
4415 prof_prefork0(tsd_tsdn(tsd));
4416 if (have_background_thread) {
4417 background_thread_prefork1(tsd_tsdn(tsd));
4418 }
4419 /* Break arena prefork into stages to preserve lock order. */
4420 for (i = 0; i < 9; i++) {
4421 for (j = 0; j < narenas; j++) {
4422 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
4423 NULL) {
4424 switch (i) {
4425 case 0:
4426 arena_prefork0(tsd_tsdn(tsd), arena);
4427 break;
4428 case 1:
4429 arena_prefork1(tsd_tsdn(tsd), arena);
4430 break;
4431 case 2:
4432 arena_prefork2(tsd_tsdn(tsd), arena);
4433 break;
4434 case 3:
4435 arena_prefork3(tsd_tsdn(tsd), arena);
4436 break;
4437 case 4:
4438 arena_prefork4(tsd_tsdn(tsd), arena);
4439 break;
4440 case 5:
4441 arena_prefork5(tsd_tsdn(tsd), arena);
4442 break;
4443 case 6:
4444 arena_prefork6(tsd_tsdn(tsd), arena);
4445 break;
4446 case 7:
4447 arena_prefork7(tsd_tsdn(tsd), arena);
4448 break;
4449 case 8:
4450 arena_prefork8(tsd_tsdn(tsd), arena);
4451 break;
4452 default: not_reached();
4453 }
4454 }
4455 }
4456
4457 }
4458 prof_prefork1(tsd_tsdn(tsd));
4459 stats_prefork(tsd_tsdn(tsd));
4460 tsd_prefork(tsd);
4461}
4462
4463#ifndef JEMALLOC_MUTEX_INIT_CB
4464void
4465jemalloc_postfork_parent(void)
4466#else
4467JEMALLOC_EXPORT void
4468_malloc_postfork(void)
4469#endif
4470{
4471 tsd_t *tsd;
4472 unsigned i, narenas;
4473
4474#ifdef JEMALLOC_MUTEX_INIT_CB
4475 if (!malloc_initialized()) {
4476 return;
4477 }
4478#endif
4479 assert(malloc_initialized());
4480
4481 tsd = tsd_fetch();
4482
4483 tsd_postfork_parent(tsd);
4484
4485 witness_postfork_parent(tsd_witness_tsdp_get(tsd));
4486 /* Release all mutexes, now that fork() has completed. */
4487 stats_postfork_parent(tsd_tsdn(tsd));
4488 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
4489 arena_t *arena;
4490
4491 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
4492 arena_postfork_parent(tsd_tsdn(tsd), arena);
4493 }
4494 }
4495 prof_postfork_parent(tsd_tsdn(tsd));
4496 if (have_background_thread) {
4497 background_thread_postfork_parent(tsd_tsdn(tsd));
4498 }
4499 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
4500 tcache_postfork_parent(tsd_tsdn(tsd));
4501 ctl_postfork_parent(tsd_tsdn(tsd));
4502}
4503
4504void
4505jemalloc_postfork_child(void) {
4506 tsd_t *tsd;
4507 unsigned i, narenas;
4508
4509 assert(malloc_initialized());
4510
4511 tsd = tsd_fetch();
4512
4513 tsd_postfork_child(tsd);
4514
4515 witness_postfork_child(tsd_witness_tsdp_get(tsd));
4516 /* Release all mutexes, now that fork() has completed. */
4517 stats_postfork_child(tsd_tsdn(tsd));
4518 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
4519 arena_t *arena;
4520
4521 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
4522 arena_postfork_child(tsd_tsdn(tsd), arena);
4523 }
4524 }
4525 prof_postfork_child(tsd_tsdn(tsd));
4526 if (have_background_thread) {
4527 background_thread_postfork_child(tsd_tsdn(tsd));
4528 }
4529 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
4530 tcache_postfork_child(tsd_tsdn(tsd));
4531 ctl_postfork_child(tsd_tsdn(tsd));
4532}
4533
4534/******************************************************************************/
4535