1 | #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H |
2 | #define JEMALLOC_INTERNAL_ARENA_INLINES_A_H |
3 | |
4 | static inline unsigned |
5 | arena_ind_get(const arena_t *arena) { |
6 | return base_ind_get(arena->base); |
7 | } |
8 | |
9 | static inline void |
10 | arena_internal_add(arena_t *arena, size_t size) { |
11 | atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); |
12 | } |
13 | |
14 | static inline void |
15 | arena_internal_sub(arena_t *arena, size_t size) { |
16 | atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); |
17 | } |
18 | |
19 | static inline size_t |
20 | arena_internal_get(arena_t *arena) { |
21 | return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); |
22 | } |
23 | |
24 | static inline bool |
25 | arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { |
26 | cassert(config_prof); |
27 | |
28 | if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { |
29 | return false; |
30 | } |
31 | |
32 | return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); |
33 | } |
34 | |
35 | static inline void |
36 | percpu_arena_update(tsd_t *tsd, unsigned cpu) { |
37 | assert(have_percpu_arena); |
38 | arena_t *oldarena = tsd_arena_get(tsd); |
39 | assert(oldarena != NULL); |
40 | unsigned oldind = arena_ind_get(oldarena); |
41 | |
42 | if (oldind != cpu) { |
43 | unsigned newind = cpu; |
44 | arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); |
45 | assert(newarena != NULL); |
46 | |
47 | /* Set new arena/tcache associations. */ |
48 | arena_migrate(tsd, oldind, newind); |
49 | tcache_t *tcache = tcache_get(tsd); |
50 | if (tcache != NULL) { |
51 | tcache_arena_reassociate(tsd_tsdn(tsd), tcache, |
52 | newarena); |
53 | } |
54 | } |
55 | } |
56 | |
57 | #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ |
58 | |