1#ifndef JEMALLOC_INTERNAL_INLINES_A_H
2#define JEMALLOC_INTERNAL_INLINES_A_H
3
4#include "jemalloc/internal/atomic.h"
5#include "jemalloc/internal/bit_util.h"
6#include "jemalloc/internal/jemalloc_internal_types.h"
7#include "jemalloc/internal/sc.h"
8#include "jemalloc/internal/ticker.h"
9
10JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
11malloc_getcpu(void) {
12 assert(have_percpu_arena);
13#if defined(_WIN32)
14 return GetCurrentProcessorNumber();
15#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
16 return (malloc_cpuid_t)sched_getcpu();
17#else
18 not_reached();
19 return -1;
20#endif
21}
22
23/* Return the chosen arena index based on current cpu. */
24JEMALLOC_ALWAYS_INLINE unsigned
25percpu_arena_choose(void) {
26 assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
27
28 malloc_cpuid_t cpuid = malloc_getcpu();
29 assert(cpuid >= 0);
30
31 unsigned arena_ind;
32 if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
33 2)) {
34 arena_ind = cpuid;
35 } else {
36 assert(opt_percpu_arena == per_phycpu_arena);
37 /* Hyper threads on the same physical CPU share arena. */
38 arena_ind = cpuid - ncpus / 2;
39 }
40
41 return arena_ind;
42}
43
44/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
45JEMALLOC_ALWAYS_INLINE unsigned
46percpu_arena_ind_limit(percpu_arena_mode_t mode) {
47 assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
48 if (mode == per_phycpu_arena && ncpus > 1) {
49 if (ncpus % 2) {
50 /* This likely means a misconfig. */
51 return ncpus / 2 + 1;
52 }
53 return ncpus / 2;
54 } else {
55 return ncpus;
56 }
57}
58
59static inline arena_tdata_t *
60arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
61 arena_tdata_t *tdata;
62 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
63
64 if (unlikely(arenas_tdata == NULL)) {
65 /* arenas_tdata hasn't been initialized yet. */
66 return arena_tdata_get_hard(tsd, ind);
67 }
68 if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
69 /*
70 * ind is invalid, cache is old (too small), or tdata to be
71 * initialized.
72 */
73 return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
74 NULL);
75 }
76
77 tdata = &arenas_tdata[ind];
78 if (likely(tdata != NULL) || !refresh_if_missing) {
79 return tdata;
80 }
81 return arena_tdata_get_hard(tsd, ind);
82}
83
84static inline arena_t *
85arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
86 arena_t *ret;
87
88 assert(ind < MALLOCX_ARENA_LIMIT);
89
90 ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
91 if (unlikely(ret == NULL)) {
92 if (init_if_missing) {
93 ret = arena_init(tsdn, ind,
94 (extent_hooks_t *)&extent_hooks_default);
95 }
96 }
97 return ret;
98}
99
100static inline ticker_t *
101decay_ticker_get(tsd_t *tsd, unsigned ind) {
102 arena_tdata_t *tdata;
103
104 tdata = arena_tdata_get(tsd, ind, true);
105 if (unlikely(tdata == NULL)) {
106 return NULL;
107 }
108 return &tdata->decay_ticker;
109}
110
111JEMALLOC_ALWAYS_INLINE cache_bin_t *
112tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
113 assert(binind < SC_NBINS);
114 return &tcache->bins_small[binind];
115}
116
117JEMALLOC_ALWAYS_INLINE cache_bin_t *
118tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
119 assert(binind >= SC_NBINS &&binind < nhbins);
120 return &tcache->bins_large[binind - SC_NBINS];
121}
122
123JEMALLOC_ALWAYS_INLINE bool
124tcache_available(tsd_t *tsd) {
125 /*
126 * Thread specific auto tcache might be unavailable if: 1) during tcache
127 * initialization, or 2) disabled through thread.tcache.enabled mallctl
128 * or config options. This check covers all cases.
129 */
130 if (likely(tsd_tcache_enabled_get(tsd))) {
131 /* Associated arena == NULL implies tcache init in progress. */
132 assert(tsd_tcachep_get(tsd)->arena == NULL ||
133 tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
134 NULL);
135 return true;
136 }
137
138 return false;
139}
140
141JEMALLOC_ALWAYS_INLINE tcache_t *
142tcache_get(tsd_t *tsd) {
143 if (!tcache_available(tsd)) {
144 return NULL;
145 }
146
147 return tsd_tcachep_get(tsd);
148}
149
150static inline void
151pre_reentrancy(tsd_t *tsd, arena_t *arena) {
152 /* arena is the current context. Reentry from a0 is not allowed. */
153 assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
154
155 bool fast = tsd_fast(tsd);
156 assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
157 ++*tsd_reentrancy_levelp_get(tsd);
158 if (fast) {
159 /* Prepare slow path for reentrancy. */
160 tsd_slow_update(tsd);
161 assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
162 }
163}
164
165static inline void
166post_reentrancy(tsd_t *tsd) {
167 int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
168 assert(*reentrancy_level > 0);
169 if (--*reentrancy_level == 0) {
170 tsd_slow_update(tsd);
171 }
172}
173
174#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
175