1 | #ifndef JEMALLOC_INTERNAL_INLINES_B_H |
2 | #define JEMALLOC_INTERNAL_INLINES_B_H |
3 | |
4 | #include "jemalloc/internal/extent.h" |
5 | |
6 | static inline void |
7 | percpu_arena_update(tsd_t *tsd, unsigned cpu) { |
8 | assert(have_percpu_arena); |
9 | arena_t *oldarena = tsd_arena_get(tsd); |
10 | assert(oldarena != NULL); |
11 | unsigned oldind = arena_ind_get(oldarena); |
12 | |
13 | if (oldind != cpu) { |
14 | unsigned newind = cpu; |
15 | arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); |
16 | assert(newarena != NULL); |
17 | |
18 | /* Set new arena/tcache associations. */ |
19 | arena_migrate(tsd, oldarena, newarena); |
20 | tcache_t *tcache = tcache_get(tsd); |
21 | if (tcache != NULL) { |
22 | tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); |
23 | tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow, |
24 | tcache, newarena); |
25 | } |
26 | } |
27 | } |
28 | |
29 | |
30 | /* Choose an arena based on a per-thread value. */ |
31 | static inline arena_t * |
32 | arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { |
33 | arena_t *ret; |
34 | |
35 | if (arena != NULL) { |
36 | return arena; |
37 | } |
38 | |
39 | /* During reentrancy, arena 0 is the safest bet. */ |
40 | if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { |
41 | return arena_get(tsd_tsdn(tsd), 0, true); |
42 | } |
43 | |
44 | ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); |
45 | if (unlikely(ret == NULL)) { |
46 | ret = arena_choose_hard(tsd, internal); |
47 | assert(ret); |
48 | if (tcache_available(tsd)) { |
49 | tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd); |
50 | tcache_t *tcache = tsd_tcachep_get(tsd); |
51 | if (tcache_slow->arena != NULL) { |
52 | /* See comments in tsd_tcache_data_init().*/ |
53 | assert(tcache_slow->arena == |
54 | arena_get(tsd_tsdn(tsd), 0, false)); |
55 | if (tcache_slow->arena != ret) { |
56 | tcache_arena_reassociate(tsd_tsdn(tsd), |
57 | tcache_slow, tcache, ret); |
58 | } |
59 | } else { |
60 | tcache_arena_associate(tsd_tsdn(tsd), |
61 | tcache_slow, tcache, ret); |
62 | } |
63 | } |
64 | } |
65 | |
66 | /* |
67 | * Note that for percpu arena, if the current arena is outside of the |
68 | * auto percpu arena range, (i.e. thread is assigned to a manually |
69 | * managed arena), then percpu arena is skipped. |
70 | */ |
71 | if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && |
72 | !internal && (arena_ind_get(ret) < |
73 | percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != |
74 | tsd_tsdn(tsd))) { |
75 | unsigned ind = percpu_arena_choose(); |
76 | if (arena_ind_get(ret) != ind) { |
77 | percpu_arena_update(tsd, ind); |
78 | ret = tsd_arena_get(tsd); |
79 | } |
80 | ret->last_thd = tsd_tsdn(tsd); |
81 | } |
82 | |
83 | return ret; |
84 | } |
85 | |
86 | static inline arena_t * |
87 | arena_choose(tsd_t *tsd, arena_t *arena) { |
88 | return arena_choose_impl(tsd, arena, false); |
89 | } |
90 | |
91 | static inline arena_t * |
92 | arena_ichoose(tsd_t *tsd, arena_t *arena) { |
93 | return arena_choose_impl(tsd, arena, true); |
94 | } |
95 | |
96 | static inline bool |
97 | arena_is_auto(arena_t *arena) { |
98 | assert(narenas_auto > 0); |
99 | |
100 | return (arena_ind_get(arena) < manual_arena_base); |
101 | } |
102 | |
103 | #endif /* JEMALLOC_INTERNAL_INLINES_B_H */ |
104 | |