1 | #ifndef JEMALLOC_INTERNAL_INLINES_A_H |
2 | #define JEMALLOC_INTERNAL_INLINES_A_H |
3 | |
4 | #include "jemalloc/internal/atomic.h" |
5 | #include "jemalloc/internal/bit_util.h" |
6 | #include "jemalloc/internal/jemalloc_internal_types.h" |
7 | #include "jemalloc/internal/sc.h" |
8 | #include "jemalloc/internal/ticker.h" |
9 | |
10 | JEMALLOC_ALWAYS_INLINE malloc_cpuid_t |
11 | malloc_getcpu(void) { |
12 | assert(have_percpu_arena); |
13 | #if defined(_WIN32) |
14 | return GetCurrentProcessorNumber(); |
15 | #elif defined(JEMALLOC_HAVE_SCHED_GETCPU) |
16 | return (malloc_cpuid_t)sched_getcpu(); |
17 | #elif defined(HAVE_RDTSCP) |
18 | unsigned int ax, cx, dx; |
19 | asm volatile("rdtscp" : "=a" (ax), "=d" (dx), "=c" (cx) ::); |
20 | return (malloc_cpuid_t)(dx & 0xfff); |
21 | #elif defined(__aarch64__) && defined(__APPLE__) |
22 | /* Other oses most likely use tpidr_el0 instead */ |
23 | uintptr_t c; |
24 | asm volatile("mrs %x0, tpidrro_el0" : "=r" (c) :: "memory" ); |
25 | return (malloc_cpuid_t)(c & (1 << 3) - 1); |
26 | #else |
27 | not_reached(); |
28 | return -1; |
29 | #endif |
30 | } |
31 | |
32 | /* Return the chosen arena index based on current cpu. */ |
33 | JEMALLOC_ALWAYS_INLINE unsigned |
34 | percpu_arena_choose(void) { |
35 | assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); |
36 | |
37 | malloc_cpuid_t cpuid = malloc_getcpu(); |
38 | assert(cpuid >= 0); |
39 | |
40 | unsigned arena_ind; |
41 | if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / |
42 | 2)) { |
43 | arena_ind = cpuid; |
44 | } else { |
45 | assert(opt_percpu_arena == per_phycpu_arena); |
46 | /* Hyper threads on the same physical CPU share arena. */ |
47 | arena_ind = cpuid - ncpus / 2; |
48 | } |
49 | |
50 | return arena_ind; |
51 | } |
52 | |
53 | /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */ |
54 | JEMALLOC_ALWAYS_INLINE unsigned |
55 | percpu_arena_ind_limit(percpu_arena_mode_t mode) { |
56 | assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode)); |
57 | if (mode == per_phycpu_arena && ncpus > 1) { |
58 | if (ncpus % 2) { |
59 | /* This likely means a misconfig. */ |
60 | return ncpus / 2 + 1; |
61 | } |
62 | return ncpus / 2; |
63 | } else { |
64 | return ncpus; |
65 | } |
66 | } |
67 | |
68 | static inline arena_t * |
69 | arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { |
70 | arena_t *ret; |
71 | |
72 | assert(ind < MALLOCX_ARENA_LIMIT); |
73 | |
74 | ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); |
75 | if (unlikely(ret == NULL)) { |
76 | if (init_if_missing) { |
77 | ret = arena_init(tsdn, ind, &arena_config_default); |
78 | } |
79 | } |
80 | return ret; |
81 | } |
82 | |
83 | JEMALLOC_ALWAYS_INLINE bool |
84 | tcache_available(tsd_t *tsd) { |
85 | /* |
86 | * Thread specific auto tcache might be unavailable if: 1) during tcache |
87 | * initialization, or 2) disabled through thread.tcache.enabled mallctl |
88 | * or config options. This check covers all cases. |
89 | */ |
90 | if (likely(tsd_tcache_enabled_get(tsd))) { |
91 | /* Associated arena == NULL implies tcache init in progress. */ |
92 | if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) { |
93 | tcache_assert_initialized(tsd_tcachep_get(tsd)); |
94 | } |
95 | return true; |
96 | } |
97 | |
98 | return false; |
99 | } |
100 | |
101 | JEMALLOC_ALWAYS_INLINE tcache_t * |
102 | tcache_get(tsd_t *tsd) { |
103 | if (!tcache_available(tsd)) { |
104 | return NULL; |
105 | } |
106 | |
107 | return tsd_tcachep_get(tsd); |
108 | } |
109 | |
110 | JEMALLOC_ALWAYS_INLINE tcache_slow_t * |
111 | tcache_slow_get(tsd_t *tsd) { |
112 | if (!tcache_available(tsd)) { |
113 | return NULL; |
114 | } |
115 | |
116 | return tsd_tcache_slowp_get(tsd); |
117 | } |
118 | |
119 | static inline void |
120 | pre_reentrancy(tsd_t *tsd, arena_t *arena) { |
121 | /* arena is the current context. Reentry from a0 is not allowed. */ |
122 | assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); |
123 | tsd_pre_reentrancy_raw(tsd); |
124 | } |
125 | |
126 | static inline void |
127 | post_reentrancy(tsd_t *tsd) { |
128 | tsd_post_reentrancy_raw(tsd); |
129 | } |
130 | |
131 | #endif /* JEMALLOC_INTERNAL_INLINES_A_H */ |
132 | |