1 | #ifndef JEMALLOC_INTERNAL_INLINES_C_H |
2 | #define JEMALLOC_INTERNAL_INLINES_C_H |
3 | |
4 | #include "jemalloc/internal/hook.h" |
5 | #include "jemalloc/internal/jemalloc_internal_types.h" |
6 | #include "jemalloc/internal/sz.h" |
7 | #include "jemalloc/internal/witness.h" |
8 | |
9 | /* |
10 | * Translating the names of the 'i' functions: |
11 | * Abbreviations used in the first part of the function name (before |
12 | * alloc/dalloc) describe what that function accomplishes: |
13 | * a: arena (query) |
14 | * s: size (query, or sized deallocation) |
15 | * e: extent (query) |
16 | * p: aligned (allocates) |
17 | * vs: size (query, without knowing that the pointer is into the heap) |
18 | * r: rallocx implementation |
19 | * x: xallocx implementation |
20 | * Abbreviations used in the second part of the function name (after |
21 | * alloc/dalloc) describe the arguments it takes |
22 | * z: whether to return zeroed memory |
23 | * t: accepts a tcache_t * parameter |
24 | * m: accepts an arena_t * parameter |
25 | */ |
26 | |
27 | JEMALLOC_ALWAYS_INLINE arena_t * |
28 | iaalloc(tsdn_t *tsdn, const void *ptr) { |
29 | assert(ptr != NULL); |
30 | |
31 | return arena_aalloc(tsdn, ptr); |
32 | } |
33 | |
34 | JEMALLOC_ALWAYS_INLINE size_t |
35 | isalloc(tsdn_t *tsdn, const void *ptr) { |
36 | assert(ptr != NULL); |
37 | |
38 | return arena_salloc(tsdn, ptr); |
39 | } |
40 | |
41 | JEMALLOC_ALWAYS_INLINE void * |
42 | iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, |
43 | bool is_internal, arena_t *arena, bool slow_path) { |
44 | void *ret; |
45 | |
46 | assert(!is_internal || tcache == NULL); |
47 | assert(!is_internal || arena == NULL || arena_is_auto(arena)); |
48 | if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { |
49 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
50 | WITNESS_RANK_CORE, 0); |
51 | } |
52 | |
53 | ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); |
54 | if (config_stats && is_internal && likely(ret != NULL)) { |
55 | arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); |
56 | } |
57 | return ret; |
58 | } |
59 | |
60 | JEMALLOC_ALWAYS_INLINE void * |
61 | ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { |
62 | return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, |
63 | NULL, slow_path); |
64 | } |
65 | |
66 | JEMALLOC_ALWAYS_INLINE void * |
67 | ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, |
68 | tcache_t *tcache, bool is_internal, arena_t *arena) { |
69 | void *ret; |
70 | |
71 | assert(usize != 0); |
72 | assert(usize == sz_sa2u(usize, alignment)); |
73 | assert(!is_internal || tcache == NULL); |
74 | assert(!is_internal || arena == NULL || arena_is_auto(arena)); |
75 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
76 | WITNESS_RANK_CORE, 0); |
77 | |
78 | ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); |
79 | assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); |
80 | if (config_stats && is_internal && likely(ret != NULL)) { |
81 | arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); |
82 | } |
83 | return ret; |
84 | } |
85 | |
86 | JEMALLOC_ALWAYS_INLINE void * |
87 | ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, |
88 | tcache_t *tcache, arena_t *arena) { |
89 | return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena); |
90 | } |
91 | |
92 | JEMALLOC_ALWAYS_INLINE void * |
93 | ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { |
94 | return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, |
95 | tcache_get(tsd), false, NULL); |
96 | } |
97 | |
98 | JEMALLOC_ALWAYS_INLINE size_t |
99 | ivsalloc(tsdn_t *tsdn, const void *ptr) { |
100 | return arena_vsalloc(tsdn, ptr); |
101 | } |
102 | |
103 | JEMALLOC_ALWAYS_INLINE void |
104 | idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, |
105 | bool is_internal, bool slow_path) { |
106 | assert(ptr != NULL); |
107 | assert(!is_internal || tcache == NULL); |
108 | assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); |
109 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
110 | WITNESS_RANK_CORE, 0); |
111 | if (config_stats && is_internal) { |
112 | arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); |
113 | } |
114 | if (!is_internal && !tsdn_null(tsdn) && |
115 | tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { |
116 | assert(tcache == NULL); |
117 | } |
118 | arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); |
119 | } |
120 | |
121 | JEMALLOC_ALWAYS_INLINE void |
122 | idalloc(tsd_t *tsd, void *ptr) { |
123 | idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); |
124 | } |
125 | |
126 | JEMALLOC_ALWAYS_INLINE void |
127 | isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, |
128 | alloc_ctx_t *alloc_ctx, bool slow_path) { |
129 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
130 | WITNESS_RANK_CORE, 0); |
131 | arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); |
132 | } |
133 | |
134 | JEMALLOC_ALWAYS_INLINE void * |
135 | iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, |
136 | size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, |
137 | hook_ralloc_args_t *hook_args) { |
138 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
139 | WITNESS_RANK_CORE, 0); |
140 | void *p; |
141 | size_t usize, copysize; |
142 | |
143 | usize = sz_sa2u(size, alignment); |
144 | if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { |
145 | return NULL; |
146 | } |
147 | p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); |
148 | if (p == NULL) { |
149 | return NULL; |
150 | } |
151 | /* |
152 | * Copy at most size bytes (not size+extra), since the caller has no |
153 | * expectation that the extra bytes will be reliably preserved. |
154 | */ |
155 | copysize = (size < oldsize) ? size : oldsize; |
156 | memcpy(p, ptr, copysize); |
157 | hook_invoke_alloc(hook_args->is_realloc |
158 | ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p, |
159 | hook_args->args); |
160 | hook_invoke_dalloc(hook_args->is_realloc |
161 | ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); |
162 | isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); |
163 | return p; |
164 | } |
165 | |
166 | /* |
167 | * is_realloc threads through the knowledge of whether or not this call comes |
168 | * from je_realloc (as opposed to je_rallocx); this ensures that we pass the |
169 | * correct entry point into any hooks. |
170 | * Note that these functions are all force-inlined, so no actual bool gets |
171 | * passed-around anywhere. |
172 | */ |
173 | JEMALLOC_ALWAYS_INLINE void * |
174 | iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, |
175 | bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args) |
176 | { |
177 | assert(ptr != NULL); |
178 | assert(size != 0); |
179 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
180 | WITNESS_RANK_CORE, 0); |
181 | |
182 | if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) |
183 | != 0) { |
184 | /* |
185 | * Existing object alignment is inadequate; allocate new space |
186 | * and copy. |
187 | */ |
188 | return iralloct_realign(tsdn, ptr, oldsize, size, alignment, |
189 | zero, tcache, arena, hook_args); |
190 | } |
191 | |
192 | return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero, |
193 | tcache, hook_args); |
194 | } |
195 | |
196 | JEMALLOC_ALWAYS_INLINE void * |
197 | iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, |
198 | bool zero, hook_ralloc_args_t *hook_args) { |
199 | return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, |
200 | tcache_get(tsd), NULL, hook_args); |
201 | } |
202 | |
203 | JEMALLOC_ALWAYS_INLINE bool |
204 | ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t , |
205 | size_t alignment, bool zero, size_t *newsize) { |
206 | assert(ptr != NULL); |
207 | assert(size != 0); |
208 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
209 | WITNESS_RANK_CORE, 0); |
210 | |
211 | if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) |
212 | != 0) { |
213 | /* Existing object alignment is inadequate. */ |
214 | *newsize = oldsize; |
215 | return true; |
216 | } |
217 | |
218 | return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero, |
219 | newsize); |
220 | } |
221 | |
222 | JEMALLOC_ALWAYS_INLINE int |
223 | iget_defrag_hint(tsdn_t *tsdn, void* ptr) { |
224 | int defrag = 0; |
225 | rtree_ctx_t rtree_ctx_fallback; |
226 | rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); |
227 | szind_t szind; |
228 | bool is_slab; |
229 | rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &is_slab); |
230 | if (likely(is_slab)) { |
231 | /* Small allocation. */ |
232 | extent_t *slab = iealloc(tsdn, ptr); |
233 | arena_t *arena = extent_arena_get(slab); |
234 | szind_t binind = extent_szind_get(slab); |
235 | unsigned binshard = extent_binshard_get(slab); |
236 | bin_t *bin = &arena->bins[binind].bin_shards[binshard]; |
237 | malloc_mutex_lock(tsdn, &bin->lock); |
238 | /* Don't bother moving allocations from the slab currently used for new allocations */ |
239 | if (slab != bin->slabcur) { |
240 | int free_in_slab = extent_nfree_get(slab); |
241 | if (free_in_slab) { |
242 | const bin_info_t *bin_info = &bin_infos[binind]; |
243 | /* Find number of non-full slabs and the number of regs in them */ |
244 | unsigned long curslabs = 0; |
245 | size_t curregs = 0; |
246 | /* Run on all bin shards (usually just one) */ |
247 | for (uint32_t i=0; i< bin_info->n_shards; i++) { |
248 | bin_t *bb = &arena->bins[binind].bin_shards[i]; |
249 | curslabs += bb->stats.nonfull_slabs; |
250 | /* Deduct the regs in full slabs (they're not part of the game) */ |
251 | unsigned long full_slabs = bb->stats.curslabs - bb->stats.nonfull_slabs; |
252 | curregs += bb->stats.curregs - full_slabs * bin_info->nregs; |
253 | if (bb->slabcur) { |
254 | /* Remove slabcur from the overall utilization (not a candidate to nove from) */ |
255 | curregs -= bin_info->nregs - extent_nfree_get(bb->slabcur); |
256 | curslabs -= 1; |
257 | } |
258 | } |
259 | /* Compare the utilization ratio of the slab in question to the total average |
260 | * among non-full slabs. To avoid precision loss in division, we do that by |
261 | * extrapolating the usage of the slab as if all slabs have the same usage. |
262 | * If this slab is less used than the average, we'll prefer to move the data |
263 | * to hopefully more used ones. To avoid stagnation when all slabs have the same |
264 | * utilization, we give additional 12.5% weight to the decision to defrag. */ |
265 | defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs + curregs / 8; |
266 | } |
267 | } |
268 | malloc_mutex_unlock(tsdn, &bin->lock); |
269 | } |
270 | return defrag; |
271 | } |
272 | |
273 | #endif /* JEMALLOC_INTERNAL_INLINES_C_H */ |
274 | |