1 | #include "jemalloc/internal/jemalloc_preamble.h" |
2 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
3 | |
4 | #include "jemalloc/internal/assert.h" |
5 | #include "jemalloc/internal/decay.h" |
6 | #include "jemalloc/internal/ehooks.h" |
7 | #include "jemalloc/internal/extent_dss.h" |
8 | #include "jemalloc/internal/extent_mmap.h" |
9 | #include "jemalloc/internal/san.h" |
10 | #include "jemalloc/internal/mutex.h" |
11 | #include "jemalloc/internal/rtree.h" |
12 | #include "jemalloc/internal/safety_check.h" |
13 | #include "jemalloc/internal/util.h" |
14 | |
15 | JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS |
16 | |
17 | /******************************************************************************/ |
18 | /* Data. */ |
19 | |
20 | /* |
21 | * Define names for both unininitialized and initialized phases, so that |
22 | * options and mallctl processing are straightforward. |
23 | */ |
24 | const char *percpu_arena_mode_names[] = { |
25 | "percpu" , |
26 | "phycpu" , |
27 | "disabled" , |
28 | "percpu" , |
29 | "phycpu" |
30 | }; |
31 | percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; |
32 | |
33 | ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; |
34 | ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; |
35 | |
36 | static atomic_zd_t dirty_decay_ms_default; |
37 | static atomic_zd_t muzzy_decay_ms_default; |
38 | |
39 | emap_t arena_emap_global; |
40 | pa_central_t arena_pa_central_global; |
41 | |
42 | div_info_t arena_binind_div_info[SC_NBINS]; |
43 | |
44 | size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; |
45 | size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; |
46 | |
47 | uint32_t arena_bin_offsets[SC_NBINS]; |
48 | static unsigned nbins_total; |
49 | |
50 | static unsigned huge_arena_ind; |
51 | |
52 | const arena_config_t arena_config_default = { |
53 | /* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks, |
54 | /* .metadata_use_hooks = */ true, |
55 | }; |
56 | |
57 | /******************************************************************************/ |
58 | /* |
59 | * Function prototypes for static functions that are referenced prior to |
60 | * definition. |
61 | */ |
62 | |
63 | static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, |
64 | bool is_background_thread, bool all); |
65 | static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, |
66 | bin_t *bin); |
67 | static void |
68 | arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, |
69 | size_t npages_new); |
70 | |
71 | /******************************************************************************/ |
72 | |
73 | void |
74 | arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, |
75 | const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, |
76 | size_t *nactive, size_t *ndirty, size_t *nmuzzy) { |
77 | *nthreads += arena_nthreads_get(arena, false); |
78 | *dss = dss_prec_names[arena_dss_prec_get(arena)]; |
79 | *dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty); |
80 | *muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy); |
81 | pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy); |
82 | } |
83 | |
84 | void |
85 | arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, |
86 | const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, |
87 | size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, |
88 | bin_stats_data_t *bstats, arena_stats_large_t *lstats, |
89 | pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) { |
90 | cassert(config_stats); |
91 | |
92 | arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, |
93 | muzzy_decay_ms, nactive, ndirty, nmuzzy); |
94 | |
95 | size_t base_allocated, base_resident, base_mapped, metadata_thp; |
96 | base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, |
97 | &base_mapped, &metadata_thp); |
98 | size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac); |
99 | astats->mapped += base_mapped + pac_mapped_sz; |
100 | astats->resident += base_resident; |
101 | |
102 | LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); |
103 | |
104 | astats->base += base_allocated; |
105 | atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena)); |
106 | astats->metadata_thp += metadata_thp; |
107 | |
108 | for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { |
109 | /* ndalloc should be read before nmalloc, |
110 | * since otherwise it is possible for ndalloc to be incremented, |
111 | * and the following can become true: ndalloc > nmalloc */ |
112 | uint64_t ndalloc = locked_read_u64(tsdn, |
113 | LOCKEDINT_MTX(arena->stats.mtx), |
114 | &arena->stats.lstats[i].ndalloc); |
115 | locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc); |
116 | astats->ndalloc_large += ndalloc; |
117 | |
118 | uint64_t nmalloc = locked_read_u64(tsdn, |
119 | LOCKEDINT_MTX(arena->stats.mtx), |
120 | &arena->stats.lstats[i].nmalloc); |
121 | locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc); |
122 | astats->nmalloc_large += nmalloc; |
123 | |
124 | uint64_t nrequests = locked_read_u64(tsdn, |
125 | LOCKEDINT_MTX(arena->stats.mtx), |
126 | &arena->stats.lstats[i].nrequests); |
127 | locked_inc_u64_unsynchronized(&lstats[i].nrequests, |
128 | nmalloc + nrequests); |
129 | astats->nrequests_large += nmalloc + nrequests; |
130 | |
131 | /* nfill == nmalloc for large currently. */ |
132 | locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc); |
133 | astats->nfills_large += nmalloc; |
134 | |
135 | uint64_t nflush = locked_read_u64(tsdn, |
136 | LOCKEDINT_MTX(arena->stats.mtx), |
137 | &arena->stats.lstats[i].nflushes); |
138 | locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush); |
139 | astats->nflushes_large += nflush; |
140 | |
141 | assert(nmalloc >= ndalloc); |
142 | assert(nmalloc - ndalloc <= SIZE_T_MAX); |
143 | size_t curlextents = (size_t)(nmalloc - ndalloc); |
144 | lstats[i].curlextents += curlextents; |
145 | astats->allocated_large += |
146 | curlextents * sz_index2size(SC_NBINS + i); |
147 | } |
148 | |
149 | pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats, |
150 | estats, hpastats, secstats, &astats->resident); |
151 | |
152 | LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); |
153 | |
154 | /* Currently cached bytes and sanitizer-stashed bytes in tcache. */ |
155 | astats->tcache_bytes = 0; |
156 | astats->tcache_stashed_bytes = 0; |
157 | malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); |
158 | cache_bin_array_descriptor_t *descriptor; |
159 | ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { |
160 | for (szind_t i = 0; i < nhbins; i++) { |
161 | cache_bin_t *cache_bin = &descriptor->bins[i]; |
162 | cache_bin_sz_t ncached, nstashed; |
163 | cache_bin_nitems_get_remote(cache_bin, |
164 | &tcache_bin_info[i], &ncached, &nstashed); |
165 | |
166 | astats->tcache_bytes += ncached * sz_index2size(i); |
167 | astats->tcache_stashed_bytes += nstashed * |
168 | sz_index2size(i); |
169 | } |
170 | } |
171 | malloc_mutex_prof_read(tsdn, |
172 | &astats->mutex_prof_data[arena_prof_mutex_tcache_list], |
173 | &arena->tcache_ql_mtx); |
174 | malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); |
175 | |
176 | #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ |
177 | malloc_mutex_lock(tsdn, &arena->mtx); \ |
178 | malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ |
179 | &arena->mtx); \ |
180 | malloc_mutex_unlock(tsdn, &arena->mtx); |
181 | |
182 | /* Gather per arena mutex profiling data. */ |
183 | READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); |
184 | READ_ARENA_MUTEX_PROF_DATA(base->mtx, |
185 | arena_prof_mutex_base); |
186 | #undef READ_ARENA_MUTEX_PROF_DATA |
187 | pa_shard_mtx_stats_read(tsdn, &arena->pa_shard, |
188 | astats->mutex_prof_data); |
189 | |
190 | nstime_copy(&astats->uptime, &arena->create_time); |
191 | nstime_update(&astats->uptime); |
192 | nstime_subtract(&astats->uptime, &arena->create_time); |
193 | |
194 | for (szind_t i = 0; i < SC_NBINS; i++) { |
195 | for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { |
196 | bin_stats_merge(tsdn, &bstats[i], |
197 | arena_get_bin(arena, i, j)); |
198 | } |
199 | } |
200 | } |
201 | |
202 | static void |
203 | arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, |
204 | bool is_background_thread) { |
205 | if (!background_thread_enabled() || is_background_thread) { |
206 | return; |
207 | } |
208 | background_thread_info_t *info = |
209 | arena_background_thread_info_get(arena); |
210 | if (background_thread_indefinite_sleep(info)) { |
211 | arena_maybe_do_deferred_work(tsdn, arena, |
212 | &arena->pa_shard.pac.decay_dirty, 0); |
213 | } |
214 | } |
215 | |
216 | /* |
217 | * React to deferred work generated by a PAI function. |
218 | */ |
219 | void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) { |
220 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
221 | WITNESS_RANK_CORE, 0); |
222 | |
223 | if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) { |
224 | arena_decay_dirty(tsdn, arena, false, true); |
225 | } |
226 | arena_background_thread_inactivity_check(tsdn, arena, false); |
227 | } |
228 | |
229 | static void * |
230 | arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) { |
231 | void *ret; |
232 | slab_data_t *slab_data = edata_slab_data_get(slab); |
233 | size_t regind; |
234 | |
235 | assert(edata_nfree_get(slab) > 0); |
236 | assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); |
237 | |
238 | regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); |
239 | ret = (void *)((uintptr_t)edata_addr_get(slab) + |
240 | (uintptr_t)(bin_info->reg_size * regind)); |
241 | edata_nfree_dec(slab); |
242 | return ret; |
243 | } |
244 | |
245 | static void |
246 | arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info, |
247 | unsigned cnt, void** ptrs) { |
248 | slab_data_t *slab_data = edata_slab_data_get(slab); |
249 | |
250 | assert(edata_nfree_get(slab) >= cnt); |
251 | assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); |
252 | |
253 | #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) |
254 | for (unsigned i = 0; i < cnt; i++) { |
255 | size_t regind = bitmap_sfu(slab_data->bitmap, |
256 | &bin_info->bitmap_info); |
257 | *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) + |
258 | (uintptr_t)(bin_info->reg_size * regind)); |
259 | } |
260 | #else |
261 | unsigned group = 0; |
262 | bitmap_t g = slab_data->bitmap[group]; |
263 | unsigned i = 0; |
264 | while (i < cnt) { |
265 | while (g == 0) { |
266 | g = slab_data->bitmap[++group]; |
267 | } |
268 | size_t shift = group << LG_BITMAP_GROUP_NBITS; |
269 | size_t pop = popcount_lu(g); |
270 | if (pop > (cnt - i)) { |
271 | pop = cnt - i; |
272 | } |
273 | |
274 | /* |
275 | * Load from memory locations only once, outside the |
276 | * hot loop below. |
277 | */ |
278 | uintptr_t base = (uintptr_t)edata_addr_get(slab); |
279 | uintptr_t regsize = (uintptr_t)bin_info->reg_size; |
280 | while (pop--) { |
281 | size_t bit = cfs_lu(&g); |
282 | size_t regind = shift + bit; |
283 | *(ptrs + i) = (void *)(base + regsize * regind); |
284 | |
285 | i++; |
286 | } |
287 | slab_data->bitmap[group] = g; |
288 | } |
289 | #endif |
290 | edata_nfree_sub(slab, cnt); |
291 | } |
292 | |
293 | static void |
294 | arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { |
295 | szind_t index, hindex; |
296 | |
297 | cassert(config_stats); |
298 | |
299 | if (usize < SC_LARGE_MINCLASS) { |
300 | usize = SC_LARGE_MINCLASS; |
301 | } |
302 | index = sz_size2index(usize); |
303 | hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; |
304 | |
305 | locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), |
306 | &arena->stats.lstats[hindex].nmalloc, 1); |
307 | } |
308 | |
309 | static void |
310 | arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { |
311 | szind_t index, hindex; |
312 | |
313 | cassert(config_stats); |
314 | |
315 | if (usize < SC_LARGE_MINCLASS) { |
316 | usize = SC_LARGE_MINCLASS; |
317 | } |
318 | index = sz_size2index(usize); |
319 | hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; |
320 | |
321 | locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), |
322 | &arena->stats.lstats[hindex].ndalloc, 1); |
323 | } |
324 | |
325 | static void |
326 | arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, |
327 | size_t usize) { |
328 | arena_large_malloc_stats_update(tsdn, arena, usize); |
329 | arena_large_dalloc_stats_update(tsdn, arena, oldusize); |
330 | } |
331 | |
332 | edata_t * |
333 | arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, |
334 | size_t alignment, bool zero) { |
335 | bool deferred_work_generated = false; |
336 | szind_t szind = sz_size2index(usize); |
337 | size_t esize = usize + sz_large_pad; |
338 | |
339 | bool guarded = san_large_extent_decide_guard(tsdn, |
340 | arena_get_ehooks(arena), esize, alignment); |
341 | edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment, |
342 | /* slab */ false, szind, zero, guarded, &deferred_work_generated); |
343 | assert(deferred_work_generated == false); |
344 | |
345 | if (edata != NULL) { |
346 | if (config_stats) { |
347 | LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); |
348 | arena_large_malloc_stats_update(tsdn, arena, usize); |
349 | LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); |
350 | } |
351 | } |
352 | |
353 | if (edata != NULL && sz_large_pad != 0) { |
354 | arena_cache_oblivious_randomize(tsdn, arena, edata, alignment); |
355 | } |
356 | |
357 | return edata; |
358 | } |
359 | |
360 | void |
361 | arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) { |
362 | if (config_stats) { |
363 | LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); |
364 | arena_large_dalloc_stats_update(tsdn, arena, |
365 | edata_usize_get(edata)); |
366 | LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); |
367 | } |
368 | } |
369 | |
370 | void |
371 | arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata, |
372 | size_t oldusize) { |
373 | size_t usize = edata_usize_get(edata); |
374 | |
375 | if (config_stats) { |
376 | LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); |
377 | arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); |
378 | LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); |
379 | } |
380 | } |
381 | |
382 | void |
383 | arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata, |
384 | size_t oldusize) { |
385 | size_t usize = edata_usize_get(edata); |
386 | |
387 | if (config_stats) { |
388 | LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx); |
389 | arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); |
390 | LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx); |
391 | } |
392 | } |
393 | |
394 | /* |
395 | * In situations where we're not forcing a decay (i.e. because the user |
396 | * specifically requested it), should we purge ourselves, or wait for the |
397 | * background thread to get to it. |
398 | */ |
399 | static pac_purge_eagerness_t |
400 | arena_decide_unforced_purge_eagerness(bool is_background_thread) { |
401 | if (is_background_thread) { |
402 | return PAC_PURGE_ALWAYS; |
403 | } else if (!is_background_thread && background_thread_enabled()) { |
404 | return PAC_PURGE_NEVER; |
405 | } else { |
406 | return PAC_PURGE_ON_EPOCH_ADVANCE; |
407 | } |
408 | } |
409 | |
410 | bool |
411 | arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state, |
412 | ssize_t decay_ms) { |
413 | pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness( |
414 | /* is_background_thread */ false); |
415 | return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms, |
416 | eagerness); |
417 | } |
418 | |
419 | ssize_t |
420 | arena_decay_ms_get(arena_t *arena, extent_state_t state) { |
421 | return pa_decay_ms_get(&arena->pa_shard, state); |
422 | } |
423 | |
424 | static bool |
425 | arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay, |
426 | pac_decay_stats_t *decay_stats, ecache_t *ecache, |
427 | bool is_background_thread, bool all) { |
428 | if (all) { |
429 | malloc_mutex_lock(tsdn, &decay->mtx); |
430 | pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats, |
431 | ecache, /* fully_decay */ all); |
432 | malloc_mutex_unlock(tsdn, &decay->mtx); |
433 | return false; |
434 | } |
435 | |
436 | if (malloc_mutex_trylock(tsdn, &decay->mtx)) { |
437 | /* No need to wait if another thread is in progress. */ |
438 | return true; |
439 | } |
440 | pac_purge_eagerness_t eagerness = |
441 | arena_decide_unforced_purge_eagerness(is_background_thread); |
442 | bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac, |
443 | decay, decay_stats, ecache, eagerness); |
444 | size_t npages_new; |
445 | if (epoch_advanced) { |
446 | /* Backlog is updated on epoch advance. */ |
447 | npages_new = decay_epoch_npages_delta(decay); |
448 | } |
449 | malloc_mutex_unlock(tsdn, &decay->mtx); |
450 | |
451 | if (have_background_thread && background_thread_enabled() && |
452 | epoch_advanced && !is_background_thread) { |
453 | arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new); |
454 | } |
455 | |
456 | return false; |
457 | } |
458 | |
459 | static bool |
460 | arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, |
461 | bool all) { |
462 | return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty, |
463 | &arena->pa_shard.pac.stats->decay_dirty, |
464 | &arena->pa_shard.pac.ecache_dirty, is_background_thread, all); |
465 | } |
466 | |
467 | static bool |
468 | arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, |
469 | bool all) { |
470 | if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) { |
471 | return false; |
472 | } |
473 | return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy, |
474 | &arena->pa_shard.pac.stats->decay_muzzy, |
475 | &arena->pa_shard.pac.ecache_muzzy, is_background_thread, all); |
476 | } |
477 | |
478 | void |
479 | arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { |
480 | if (all) { |
481 | /* |
482 | * We should take a purge of "all" to mean "save as much memory |
483 | * as possible", including flushing any caches (for situations |
484 | * like thread death, or manual purge calls). |
485 | */ |
486 | sec_flush(tsdn, &arena->pa_shard.hpa_sec); |
487 | } |
488 | if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { |
489 | return; |
490 | } |
491 | arena_decay_muzzy(tsdn, arena, is_background_thread, all); |
492 | } |
493 | |
494 | static bool |
495 | arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay, |
496 | background_thread_info_t *info, nstime_t *remaining_sleep, |
497 | size_t npages_new) { |
498 | malloc_mutex_assert_owner(tsdn, &info->mtx); |
499 | |
500 | if (malloc_mutex_trylock(tsdn, &decay->mtx)) { |
501 | return false; |
502 | } |
503 | |
504 | if (!decay_gradually(decay)) { |
505 | malloc_mutex_unlock(tsdn, &decay->mtx); |
506 | return false; |
507 | } |
508 | |
509 | nstime_init(remaining_sleep, background_thread_wakeup_time_get(info)); |
510 | if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) { |
511 | malloc_mutex_unlock(tsdn, &decay->mtx); |
512 | return false; |
513 | } |
514 | nstime_subtract(remaining_sleep, &decay->epoch); |
515 | if (npages_new > 0) { |
516 | uint64_t npurge_new = decay_npages_purge_in(decay, |
517 | remaining_sleep, npages_new); |
518 | info->npages_to_purge_new += npurge_new; |
519 | } |
520 | malloc_mutex_unlock(tsdn, &decay->mtx); |
521 | return info->npages_to_purge_new > |
522 | ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD; |
523 | } |
524 | |
525 | /* |
526 | * Check if deferred work needs to be done sooner than planned. |
527 | * For decay we might want to wake up earlier because of an influx of dirty |
528 | * pages. Rather than waiting for previously estimated time, we proactively |
529 | * purge those pages. |
530 | * If background thread sleeps indefinitely, always wake up because some |
531 | * deferred work has been generated. |
532 | */ |
533 | static void |
534 | arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay, |
535 | size_t npages_new) { |
536 | background_thread_info_t *info = arena_background_thread_info_get( |
537 | arena); |
538 | if (malloc_mutex_trylock(tsdn, &info->mtx)) { |
539 | /* |
540 | * Background thread may hold the mutex for a long period of |
541 | * time. We'd like to avoid the variance on application |
542 | * threads. So keep this non-blocking, and leave the work to a |
543 | * future epoch. |
544 | */ |
545 | return; |
546 | } |
547 | if (!background_thread_is_started(info)) { |
548 | goto label_done; |
549 | } |
550 | |
551 | nstime_t remaining_sleep; |
552 | if (background_thread_indefinite_sleep(info)) { |
553 | background_thread_wakeup_early(info, NULL); |
554 | } else if (arena_should_decay_early(tsdn, arena, decay, info, |
555 | &remaining_sleep, npages_new)) { |
556 | info->npages_to_purge_new = 0; |
557 | background_thread_wakeup_early(info, &remaining_sleep); |
558 | } |
559 | label_done: |
560 | malloc_mutex_unlock(tsdn, &info->mtx); |
561 | } |
562 | |
563 | /* Called from background threads. */ |
564 | void |
565 | arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) { |
566 | arena_decay(tsdn, arena, true, false); |
567 | pa_shard_do_deferred_work(tsdn, &arena->pa_shard); |
568 | } |
569 | |
570 | void |
571 | arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) { |
572 | bool deferred_work_generated = false; |
573 | pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated); |
574 | if (deferred_work_generated) { |
575 | arena_handle_deferred_work(tsdn, arena); |
576 | } |
577 | } |
578 | |
579 | static void |
580 | arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) { |
581 | assert(edata_nfree_get(slab) > 0); |
582 | edata_heap_insert(&bin->slabs_nonfull, slab); |
583 | if (config_stats) { |
584 | bin->stats.nonfull_slabs++; |
585 | } |
586 | } |
587 | |
588 | static void |
589 | arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) { |
590 | edata_heap_remove(&bin->slabs_nonfull, slab); |
591 | if (config_stats) { |
592 | bin->stats.nonfull_slabs--; |
593 | } |
594 | } |
595 | |
596 | static edata_t * |
597 | arena_bin_slabs_nonfull_tryget(bin_t *bin) { |
598 | edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull); |
599 | if (slab == NULL) { |
600 | return NULL; |
601 | } |
602 | if (config_stats) { |
603 | bin->stats.reslabs++; |
604 | bin->stats.nonfull_slabs--; |
605 | } |
606 | return slab; |
607 | } |
608 | |
609 | static void |
610 | arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) { |
611 | assert(edata_nfree_get(slab) == 0); |
612 | /* |
613 | * Tracking extents is required by arena_reset, which is not allowed |
614 | * for auto arenas. Bypass this step to avoid touching the edata |
615 | * linkage (often results in cache misses) for auto arenas. |
616 | */ |
617 | if (arena_is_auto(arena)) { |
618 | return; |
619 | } |
620 | edata_list_active_append(&bin->slabs_full, slab); |
621 | } |
622 | |
623 | static void |
624 | arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) { |
625 | if (arena_is_auto(arena)) { |
626 | return; |
627 | } |
628 | edata_list_active_remove(&bin->slabs_full, slab); |
629 | } |
630 | |
631 | static void |
632 | arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { |
633 | edata_t *slab; |
634 | |
635 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
636 | if (bin->slabcur != NULL) { |
637 | slab = bin->slabcur; |
638 | bin->slabcur = NULL; |
639 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
640 | arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); |
641 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
642 | } |
643 | while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) { |
644 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
645 | arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); |
646 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
647 | } |
648 | for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL; |
649 | slab = edata_list_active_first(&bin->slabs_full)) { |
650 | arena_bin_slabs_full_remove(arena, bin, slab); |
651 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
652 | arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); |
653 | malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); |
654 | } |
655 | if (config_stats) { |
656 | bin->stats.curregs = 0; |
657 | bin->stats.curslabs = 0; |
658 | } |
659 | malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); |
660 | } |
661 | |
662 | void |
663 | arena_reset(tsd_t *tsd, arena_t *arena) { |
664 | /* |
665 | * Locking in this function is unintuitive. The caller guarantees that |
666 | * no concurrent operations are happening in this arena, but there are |
667 | * still reasons that some locking is necessary: |
668 | * |
669 | * - Some of the functions in the transitive closure of calls assume |
670 | * appropriate locks are held, and in some cases these locks are |
671 | * temporarily dropped to avoid lock order reversal or deadlock due to |
672 | * reentry. |
673 | * - mallctl("epoch", ...) may concurrently refresh stats. While |
674 | * strictly speaking this is a "concurrent operation", disallowing |
675 | * stats refreshes would impose an inconvenient burden. |
676 | */ |
677 | |
678 | /* Large allocations. */ |
679 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); |
680 | |
681 | for (edata_t *edata = edata_list_active_first(&arena->large); |
682 | edata != NULL; edata = edata_list_active_first(&arena->large)) { |
683 | void *ptr = edata_base_get(edata); |
684 | size_t usize; |
685 | |
686 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); |
687 | emap_alloc_ctx_t alloc_ctx; |
688 | emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr, |
689 | &alloc_ctx); |
690 | assert(alloc_ctx.szind != SC_NSIZES); |
691 | |
692 | if (config_stats || (config_prof && opt_prof)) { |
693 | usize = sz_index2size(alloc_ctx.szind); |
694 | assert(usize == isalloc(tsd_tsdn(tsd), ptr)); |
695 | } |
696 | /* Remove large allocation from prof sample set. */ |
697 | if (config_prof && opt_prof) { |
698 | prof_free(tsd, ptr, usize, &alloc_ctx); |
699 | } |
700 | large_dalloc(tsd_tsdn(tsd), edata); |
701 | malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); |
702 | } |
703 | malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); |
704 | |
705 | /* Bins. */ |
706 | for (unsigned i = 0; i < SC_NBINS; i++) { |
707 | for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { |
708 | arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j)); |
709 | } |
710 | } |
711 | pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard); |
712 | } |
713 | |
714 | static void |
715 | arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes, |
716 | unsigned n_mtx) { |
717 | for (unsigned i = 0; i < n_mtx; i++) { |
718 | malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]); |
719 | malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]); |
720 | } |
721 | } |
722 | |
723 | #define ARENA_DESTROY_MAX_DELAYED_MTX 32 |
724 | static void |
725 | arena_prepare_base_deletion_sync(tsd_t *tsd, malloc_mutex_t *mtx, |
726 | malloc_mutex_t **delayed_mtx, unsigned *n_delayed) { |
727 | if (!malloc_mutex_trylock(tsd_tsdn(tsd), mtx)) { |
728 | /* No contention. */ |
729 | malloc_mutex_unlock(tsd_tsdn(tsd), mtx); |
730 | return; |
731 | } |
732 | unsigned n = *n_delayed; |
733 | assert(n < ARENA_DESTROY_MAX_DELAYED_MTX); |
734 | /* Add another to the batch. */ |
735 | delayed_mtx[n++] = mtx; |
736 | |
737 | if (n == ARENA_DESTROY_MAX_DELAYED_MTX) { |
738 | arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n); |
739 | n = 0; |
740 | } |
741 | *n_delayed = n; |
742 | } |
743 | |
744 | static void |
745 | arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) { |
746 | /* |
747 | * In order to coalesce, emap_try_acquire_edata_neighbor will attempt to |
748 | * check neighbor edata's state to determine eligibility. This means |
749 | * under certain conditions, the metadata from an arena can be accessed |
750 | * w/o holding any locks from that arena. In order to guarantee safe |
751 | * memory access, the metadata and the underlying base allocator needs |
752 | * to be kept alive, until all pending accesses are done. |
753 | * |
754 | * 1) with opt_retain, the arena boundary implies the is_head state |
755 | * (tracked in the rtree leaf), and the coalesce flow will stop at the |
756 | * head state branch. Therefore no cross arena metadata access |
757 | * possible. |
758 | * |
759 | * 2) w/o opt_retain, the arena id needs to be read from the edata_t, |
760 | * meaning read only cross-arena metadata access is possible. The |
761 | * coalesce attempt will stop at the arena_id mismatch, and is always |
762 | * under one of the ecache locks. To allow safe passthrough of such |
763 | * metadata accesses, the loop below will iterate through all manual |
764 | * arenas' ecache locks. As all the metadata from this base allocator |
765 | * have been unlinked from the rtree, after going through all the |
766 | * relevant ecache locks, it's safe to say that a) pending accesses are |
767 | * all finished, and b) no new access will be generated. |
768 | */ |
769 | if (opt_retain) { |
770 | return; |
771 | } |
772 | unsigned destroy_ind = base_ind_get(base_to_destroy); |
773 | assert(destroy_ind >= manual_arena_base); |
774 | |
775 | tsdn_t *tsdn = tsd_tsdn(tsd); |
776 | malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX]; |
777 | unsigned n_delayed = 0, total = narenas_total_get(); |
778 | for (unsigned i = 0; i < total; i++) { |
779 | if (i == destroy_ind) { |
780 | continue; |
781 | } |
782 | arena_t *arena = arena_get(tsdn, i, false); |
783 | if (arena == NULL) { |
784 | continue; |
785 | } |
786 | pac_t *pac = &arena->pa_shard.pac; |
787 | arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx, |
788 | delayed_mtx, &n_delayed); |
789 | arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx, |
790 | delayed_mtx, &n_delayed); |
791 | arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx, |
792 | delayed_mtx, &n_delayed); |
793 | } |
794 | arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed); |
795 | } |
796 | #undef ARENA_DESTROY_MAX_DELAYED_MTX |
797 | |
798 | void |
799 | arena_destroy(tsd_t *tsd, arena_t *arena) { |
800 | assert(base_ind_get(arena->base) >= narenas_auto); |
801 | assert(arena_nthreads_get(arena, false) == 0); |
802 | assert(arena_nthreads_get(arena, true) == 0); |
803 | |
804 | /* |
805 | * No allocations have occurred since arena_reset() was called. |
806 | * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached |
807 | * extents, so only retained extents may remain and it's safe to call |
808 | * pa_shard_destroy_retained. |
809 | */ |
810 | pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard); |
811 | |
812 | /* |
813 | * Remove the arena pointer from the arenas array. We rely on the fact |
814 | * that there is no way for the application to get a dirty read from the |
815 | * arenas array unless there is an inherent race in the application |
816 | * involving access of an arena being concurrently destroyed. The |
817 | * application must synchronize knowledge of the arena's validity, so as |
818 | * long as we use an atomic write to update the arenas array, the |
819 | * application will get a clean read any time after it synchronizes |
820 | * knowledge that the arena is no longer valid. |
821 | */ |
822 | arena_set(base_ind_get(arena->base), NULL); |
823 | |
824 | /* |
825 | * Destroy the base allocator, which manages all metadata ever mapped by |
826 | * this arena. The prepare function will make sure no pending access to |
827 | * the metadata in this base anymore. |
828 | */ |
829 | arena_prepare_base_deletion(tsd, arena->base); |
830 | base_delete(tsd_tsdn(tsd), arena->base); |
831 | } |
832 | |
833 | static edata_t * |
834 | arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, |
835 | const bin_info_t *bin_info) { |
836 | bool deferred_work_generated = false; |
837 | witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), |
838 | WITNESS_RANK_CORE, 0); |
839 | |
840 | bool guarded = san_slab_extent_decide_guard(tsdn, |
841 | arena_get_ehooks(arena)); |
842 | edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size, |
843 | /* alignment */ PAGE, /* slab */ true, /* szind */ binind, |
844 | /* zero */ false, guarded, &deferred_work_generated); |
845 | |
846 | if (deferred_work_generated) { |
847 | arena_handle_deferred_work(tsdn, arena); |
848 | } |
849 | |
850 | if (slab == NULL) { |
851 | return NULL; |
852 | } |
853 | assert(edata_slab_get(slab)); |
854 | |
855 | /* Initialize slab internals. */ |
856 | slab_data_t *slab_data = edata_slab_data_get(slab); |
857 | edata_nfree_binshard_set(slab, bin_info->nregs, binshard); |
858 | bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); |
859 | |
860 | return slab; |
861 | } |
862 | |
863 | /* |
864 | * Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab |
865 | * variants (i.e. through slabcur and nonfull) must be tried first. |
866 | */ |
867 | static void |
868 | arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, |
869 | bin_t *bin, szind_t binind, edata_t *fresh_slab) { |
870 | malloc_mutex_assert_owner(tsdn, &bin->lock); |
871 | /* Only called after slabcur and nonfull both failed. */ |
872 | assert(bin->slabcur == NULL); |
873 | assert(edata_heap_first(&bin->slabs_nonfull) == NULL); |
874 | assert(fresh_slab != NULL); |
875 | |
876 | /* A new slab from arena_slab_alloc() */ |
877 | assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs); |
878 | if (config_stats) { |
879 | bin->stats.nslabs++; |
880 | bin->stats.curslabs++; |
881 | } |
882 | bin->slabcur = fresh_slab; |
883 | } |
884 | |
885 | /* Refill slabcur and then alloc using the fresh slab */ |
886 | static void * |
887 | arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin, |
888 | szind_t binind, edata_t *fresh_slab) { |
889 | malloc_mutex_assert_owner(tsdn, &bin->lock); |
890 | arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind, |
891 | fresh_slab); |
892 | |
893 | return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]); |
894 | } |
895 | |
896 | static bool |
897 | arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, |
898 | bin_t *bin) { |
899 | malloc_mutex_assert_owner(tsdn, &bin->lock); |
900 | /* Only called after arena_slab_reg_alloc[_batch] failed. */ |
901 | assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0); |
902 | |
903 | if (bin->slabcur != NULL) { |
904 | arena_bin_slabs_full_insert(arena, bin, bin->slabcur); |
905 | } |
906 | |
907 | /* Look for a usable slab. */ |
908 | bin->slabcur = arena_bin_slabs_nonfull_tryget(bin); |
909 | assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0); |
910 | |
911 | return (bin->slabcur == NULL); |
912 | } |
913 | |
914 | bin_t * |
915 | arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, |
916 | unsigned *binshard_p) { |
917 | unsigned binshard; |
918 | if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { |
919 | binshard = 0; |
920 | } else { |
921 | binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; |
922 | } |
923 | assert(binshard < bin_infos[binind].n_shards); |
924 | if (binshard_p != NULL) { |
925 | *binshard_p = binshard; |
926 | } |
927 | return arena_get_bin(arena, binind, binshard); |
928 | } |
929 | |
930 | void |
931 | arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, |
932 | cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind, |
933 | const unsigned nfill) { |
934 | assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0); |
935 | |
936 | const bin_info_t *bin_info = &bin_infos[binind]; |
937 | |
938 | CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill); |
939 | cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs, |
940 | nfill); |
941 | /* |
942 | * Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull |
943 | * slabs. After both are exhausted, new slabs will be allocated through |
944 | * arena_slab_alloc(). |
945 | * |
946 | * Bin lock is only taken / released right before / after the while(...) |
947 | * refill loop, with new slab allocation (which has its own locking) |
948 | * kept outside of the loop. This setup facilitates flat combining, at |
949 | * the cost of the nested loop (through goto label_refill). |
950 | * |
951 | * To optimize for cases with contention and limited resources |
952 | * (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration |
953 | * gets one chance of slab_alloc, and a retry of bin local resources |
954 | * after the slab allocation (regardless if slab_alloc failed, because |
955 | * the bin lock is dropped during the slab allocation). |
956 | * |
957 | * In other words, new slab allocation is allowed, as long as there was |
958 | * progress since the previous slab_alloc. This is tracked with |
959 | * made_progress below, initialized to true to jump start the first |
960 | * iteration. |
961 | * |
962 | * In other words (again), the loop will only terminate early (i.e. stop |
963 | * with filled < nfill) after going through the three steps: a) bin |
964 | * local exhausted, b) unlock and slab_alloc returns null, c) re-lock |
965 | * and bin local fails again. |
966 | */ |
967 | bool made_progress = true; |
968 | edata_t *fresh_slab = NULL; |
969 | bool alloc_and_retry = false; |
970 | unsigned filled = 0; |
971 | unsigned binshard; |
972 | bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); |
973 | |
974 | label_refill: |
975 | malloc_mutex_lock(tsdn, &bin->lock); |
976 | |
977 | while (filled < nfill) { |
978 | /* Try batch-fill from slabcur first. */ |
979 | edata_t *slabcur = bin->slabcur; |
980 | if (slabcur != NULL && edata_nfree_get(slabcur) > 0) { |
981 | unsigned tofill = nfill - filled; |
982 | unsigned nfree = edata_nfree_get(slabcur); |
983 | unsigned cnt = tofill < nfree ? tofill : nfree; |
984 | |
985 | arena_slab_reg_alloc_batch(slabcur, bin_info, cnt, |
986 | &ptrs.ptr[filled]); |
987 | made_progress = true; |
988 | filled += cnt; |
989 | continue; |
990 | } |
991 | /* Next try refilling slabcur from nonfull slabs. */ |
992 | if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) { |
993 | assert(bin->slabcur != NULL); |
994 | continue; |
995 | } |
996 | |
997 | /* Then see if a new slab was reserved already. */ |
998 | if (fresh_slab != NULL) { |
999 | arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, |
1000 | bin, binind, fresh_slab); |
1001 | assert(bin->slabcur != NULL); |
1002 | fresh_slab = NULL; |
1003 | continue; |
1004 | } |
1005 | |
1006 | /* Try slab_alloc if made progress (or never did slab_alloc). */ |
1007 | if (made_progress) { |
1008 | assert(bin->slabcur == NULL); |
1009 | assert(fresh_slab == NULL); |
1010 | alloc_and_retry = true; |
1011 | /* Alloc a new slab then come back. */ |
1012 | break; |
1013 | } |
1014 | |
1015 | /* OOM. */ |
1016 | |
1017 | assert(fresh_slab == NULL); |
1018 | assert(!alloc_and_retry); |
1019 | break; |
1020 | } /* while (filled < nfill) loop. */ |
1021 | |
1022 | if (config_stats && !alloc_and_retry) { |
1023 | bin->stats.nmalloc += filled; |
1024 | bin->stats.nrequests += cache_bin->tstats.nrequests; |
1025 | bin->stats.curregs += filled; |
1026 | bin->stats.nfills++; |
1027 | cache_bin->tstats.nrequests = 0; |
1028 | } |
1029 | |
1030 | malloc_mutex_unlock(tsdn, &bin->lock); |
1031 | |
1032 | if (alloc_and_retry) { |
1033 | assert(fresh_slab == NULL); |
1034 | assert(filled < nfill); |
1035 | assert(made_progress); |
1036 | |
1037 | fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard, |
1038 | bin_info); |
1039 | /* fresh_slab NULL case handled in the for loop. */ |
1040 | |
1041 | alloc_and_retry = false; |
1042 | made_progress = false; |
1043 | goto label_refill; |
1044 | } |
1045 | assert(filled == nfill || (fresh_slab == NULL && !made_progress)); |
1046 | |
1047 | /* Release if allocated but not used. */ |
1048 | if (fresh_slab != NULL) { |
1049 | assert(edata_nfree_get(fresh_slab) == bin_info->nregs); |
1050 | arena_slab_dalloc(tsdn, arena, fresh_slab); |
1051 | fresh_slab = NULL; |
1052 | } |
1053 | |
1054 | cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled); |
1055 | arena_decay_tick(tsdn, arena); |
1056 | } |
1057 | |
1058 | size_t |
1059 | arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind, |
1060 | void **ptrs, size_t nfill, bool zero) { |
1061 | assert(binind < SC_NBINS); |
1062 | const bin_info_t *bin_info = &bin_infos[binind]; |
1063 | const size_t nregs = bin_info->nregs; |
1064 | assert(nregs > 0); |
1065 | const size_t usize = bin_info->reg_size; |
1066 | |
1067 | const bool manual_arena = !arena_is_auto(arena); |
1068 | unsigned binshard; |
1069 | bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); |
1070 | |
1071 | size_t nslab = 0; |
1072 | size_t filled = 0; |
1073 | edata_t *slab = NULL; |
1074 | edata_list_active_t fulls; |
1075 | edata_list_active_init(&fulls); |
1076 | |
1077 | while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind, |
1078 | binshard, bin_info)) != NULL) { |
1079 | assert((size_t)edata_nfree_get(slab) == nregs); |
1080 | ++nslab; |
1081 | size_t batch = nfill - filled; |
1082 | if (batch > nregs) { |
1083 | batch = nregs; |
1084 | } |
1085 | assert(batch > 0); |
1086 | arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch, |
1087 | &ptrs[filled]); |
1088 | assert(edata_addr_get(slab) == ptrs[filled]); |
1089 | if (zero) { |
1090 | memset(ptrs[filled], 0, batch * usize); |
1091 | } |
1092 | filled += batch; |
1093 | if (batch == nregs) { |
1094 | if (manual_arena) { |
1095 | edata_list_active_append(&fulls, slab); |
1096 | } |
1097 | slab = NULL; |
1098 | } |
1099 | } |
1100 | |
1101 | malloc_mutex_lock(tsdn, &bin->lock); |
1102 | /* |
1103 | * Only the last slab can be non-empty, and the last slab is non-empty |
1104 | * iff slab != NULL. |
1105 | */ |
1106 | if (slab != NULL) { |
1107 | arena_bin_lower_slab(tsdn, arena, slab, bin); |
1108 | } |
1109 | if (manual_arena) { |
1110 | edata_list_active_concat(&bin->slabs_full, &fulls); |
1111 | } |
1112 | assert(edata_list_active_empty(&fulls)); |
1113 | if (config_stats) { |
1114 | bin->stats.nslabs += nslab; |
1115 | bin->stats.curslabs += nslab; |
1116 | bin->stats.nmalloc += filled; |
1117 | bin->stats.nrequests += filled; |
1118 | bin->stats.curregs += filled; |
1119 | } |
1120 | malloc_mutex_unlock(tsdn, &bin->lock); |
1121 | |
1122 | arena_decay_tick(tsdn, arena); |
1123 | return filled; |
1124 | } |
1125 | |
1126 | /* |
1127 | * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill |
1128 | * bin->slabcur if necessary. |
1129 | */ |
1130 | static void * |
1131 | arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin, |
1132 | szind_t binind) { |
1133 | malloc_mutex_assert_owner(tsdn, &bin->lock); |
1134 | if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) { |
1135 | if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) { |
1136 | return NULL; |
1137 | } |
1138 | } |
1139 | |
1140 | assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0); |
1141 | return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]); |
1142 | } |
1143 | |
1144 | static void * |
1145 | arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { |
1146 | assert(binind < SC_NBINS); |
1147 | const bin_info_t *bin_info = &bin_infos[binind]; |
1148 | size_t usize = sz_index2size(binind); |
1149 | unsigned binshard; |
1150 | bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); |
1151 | |
1152 | malloc_mutex_lock(tsdn, &bin->lock); |
1153 | edata_t *fresh_slab = NULL; |
1154 | void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind); |
1155 | if (ret == NULL) { |
1156 | malloc_mutex_unlock(tsdn, &bin->lock); |
1157 | /******************************/ |
1158 | fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard, |
1159 | bin_info); |
1160 | /********************************/ |
1161 | malloc_mutex_lock(tsdn, &bin->lock); |
1162 | /* Retry since the lock was dropped. */ |
1163 | ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind); |
1164 | if (ret == NULL) { |
1165 | if (fresh_slab == NULL) { |
1166 | /* OOM */ |
1167 | malloc_mutex_unlock(tsdn, &bin->lock); |
1168 | return NULL; |
1169 | } |
1170 | ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin, |
1171 | binind, fresh_slab); |
1172 | fresh_slab = NULL; |
1173 | } |
1174 | } |
1175 | if (config_stats) { |
1176 | bin->stats.nmalloc++; |
1177 | bin->stats.nrequests++; |
1178 | bin->stats.curregs++; |
1179 | } |
1180 | malloc_mutex_unlock(tsdn, &bin->lock); |
1181 | |
1182 | if (fresh_slab != NULL) { |
1183 | arena_slab_dalloc(tsdn, arena, fresh_slab); |
1184 | } |
1185 | if (zero) { |
1186 | memset(ret, 0, usize); |
1187 | } |
1188 | arena_decay_tick(tsdn, arena); |
1189 | |
1190 | return ret; |
1191 | } |
1192 | |
1193 | void * |
1194 | arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, |
1195 | bool zero) { |
1196 | assert(!tsdn_null(tsdn) || arena != NULL); |
1197 | |
1198 | if (likely(!tsdn_null(tsdn))) { |
1199 | arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size); |
1200 | } |
1201 | if (unlikely(arena == NULL)) { |
1202 | return NULL; |
1203 | } |
1204 | |
1205 | if (likely(size <= SC_SMALL_MAXCLASS)) { |
1206 | return arena_malloc_small(tsdn, arena, ind, zero); |
1207 | } |
1208 | return large_malloc(tsdn, arena, sz_index2size(ind), zero); |
1209 | } |
1210 | |
1211 | void * |
1212 | arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, |
1213 | bool zero, tcache_t *tcache) { |
1214 | void *ret; |
1215 | |
1216 | if (usize <= SC_SMALL_MAXCLASS) { |
1217 | /* Small; alignment doesn't require special slab placement. */ |
1218 | |
1219 | /* usize should be a result of sz_sa2u() */ |
1220 | assert((usize & (alignment - 1)) == 0); |
1221 | |
1222 | /* |
1223 | * Small usize can't come from an alignment larger than a page. |
1224 | */ |
1225 | assert(alignment <= PAGE); |
1226 | |
1227 | ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), |
1228 | zero, tcache, true); |
1229 | } else { |
1230 | if (likely(alignment <= CACHELINE)) { |
1231 | ret = large_malloc(tsdn, arena, usize, zero); |
1232 | } else { |
1233 | ret = large_palloc(tsdn, arena, usize, alignment, zero); |
1234 | } |
1235 | } |
1236 | return ret; |
1237 | } |
1238 | |
1239 | void |
1240 | arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { |
1241 | cassert(config_prof); |
1242 | assert(ptr != NULL); |
1243 | assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); |
1244 | assert(usize <= SC_SMALL_MAXCLASS); |
1245 | |
1246 | if (config_opt_safety_checks) { |
1247 | safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS); |
1248 | } |
1249 | |
1250 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); |
1251 | |
1252 | szind_t szind = sz_size2index(usize); |
1253 | edata_szind_set(edata, szind); |
1254 | emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false); |
1255 | |
1256 | assert(isalloc(tsdn, ptr) == usize); |
1257 | } |
1258 | |
1259 | static size_t |
1260 | arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) { |
1261 | cassert(config_prof); |
1262 | assert(ptr != NULL); |
1263 | |
1264 | edata_szind_set(edata, SC_NBINS); |
1265 | emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false); |
1266 | |
1267 | assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); |
1268 | |
1269 | return SC_LARGE_MINCLASS; |
1270 | } |
1271 | |
1272 | void |
1273 | arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, |
1274 | bool slow_path) { |
1275 | cassert(config_prof); |
1276 | assert(opt_prof); |
1277 | |
1278 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); |
1279 | size_t usize = edata_usize_get(edata); |
1280 | size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr); |
1281 | if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) { |
1282 | /* |
1283 | * Currently, we only do redzoning for small sampled |
1284 | * allocations. |
1285 | */ |
1286 | assert(bumped_usize == SC_LARGE_MINCLASS); |
1287 | safety_check_verify_redzone(ptr, usize, bumped_usize); |
1288 | } |
1289 | if (bumped_usize <= tcache_maxclass && tcache != NULL) { |
1290 | tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, |
1291 | sz_size2index(bumped_usize), slow_path); |
1292 | } else { |
1293 | large_dalloc(tsdn, edata); |
1294 | } |
1295 | } |
1296 | |
1297 | static void |
1298 | arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) { |
1299 | /* Dissociate slab from bin. */ |
1300 | if (slab == bin->slabcur) { |
1301 | bin->slabcur = NULL; |
1302 | } else { |
1303 | szind_t binind = edata_szind_get(slab); |
1304 | const bin_info_t *bin_info = &bin_infos[binind]; |
1305 | |
1306 | /* |
1307 | * The following block's conditional is necessary because if the |
1308 | * slab only contains one region, then it never gets inserted |
1309 | * into the non-full slabs heap. |
1310 | */ |
1311 | if (bin_info->nregs == 1) { |
1312 | arena_bin_slabs_full_remove(arena, bin, slab); |
1313 | } else { |
1314 | arena_bin_slabs_nonfull_remove(bin, slab); |
1315 | } |
1316 | } |
1317 | } |
1318 | |
1319 | static void |
1320 | arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab, |
1321 | bin_t *bin) { |
1322 | assert(edata_nfree_get(slab) > 0); |
1323 | |
1324 | /* |
1325 | * Make sure that if bin->slabcur is non-NULL, it refers to the |
1326 | * oldest/lowest non-full slab. It is okay to NULL slabcur out rather |
1327 | * than proactively keeping it pointing at the oldest/lowest non-full |
1328 | * slab. |
1329 | */ |
1330 | if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) { |
1331 | /* Switch slabcur. */ |
1332 | if (edata_nfree_get(bin->slabcur) > 0) { |
1333 | arena_bin_slabs_nonfull_insert(bin, bin->slabcur); |
1334 | } else { |
1335 | arena_bin_slabs_full_insert(arena, bin, bin->slabcur); |
1336 | } |
1337 | bin->slabcur = slab; |
1338 | if (config_stats) { |
1339 | bin->stats.reslabs++; |
1340 | } |
1341 | } else { |
1342 | arena_bin_slabs_nonfull_insert(bin, slab); |
1343 | } |
1344 | } |
1345 | |
1346 | static void |
1347 | arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) { |
1348 | malloc_mutex_assert_owner(tsdn, &bin->lock); |
1349 | |
1350 | assert(slab != bin->slabcur); |
1351 | if (config_stats) { |
1352 | bin->stats.curslabs--; |
1353 | } |
1354 | } |
1355 | |
1356 | void |
1357 | arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena, |
1358 | edata_t *slab, bin_t *bin) { |
1359 | arena_dissociate_bin_slab(arena, slab, bin); |
1360 | arena_dalloc_bin_slab_prepare(tsdn, slab, bin); |
1361 | } |
1362 | |
1363 | void |
1364 | arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena, |
1365 | edata_t *slab, bin_t *bin) { |
1366 | arena_bin_slabs_full_remove(arena, bin, slab); |
1367 | arena_bin_lower_slab(tsdn, arena, slab, bin); |
1368 | } |
1369 | |
1370 | static void |
1371 | arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) { |
1372 | szind_t binind = edata_szind_get(edata); |
1373 | unsigned binshard = edata_binshard_get(edata); |
1374 | bin_t *bin = arena_get_bin(arena, binind, binshard); |
1375 | |
1376 | malloc_mutex_lock(tsdn, &bin->lock); |
1377 | arena_dalloc_bin_locked_info_t info; |
1378 | arena_dalloc_bin_locked_begin(&info, binind); |
1379 | bool ret = arena_dalloc_bin_locked_step(tsdn, arena, bin, |
1380 | &info, binind, edata, ptr); |
1381 | arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info); |
1382 | malloc_mutex_unlock(tsdn, &bin->lock); |
1383 | |
1384 | if (ret) { |
1385 | arena_slab_dalloc(tsdn, arena, edata); |
1386 | } |
1387 | } |
1388 | |
1389 | void |
1390 | arena_dalloc_small(tsdn_t *tsdn, void *ptr) { |
1391 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); |
1392 | arena_t *arena = arena_get_from_edata(edata); |
1393 | |
1394 | arena_dalloc_bin(tsdn, arena, edata, ptr); |
1395 | arena_decay_tick(tsdn, arena); |
1396 | } |
1397 | |
1398 | bool |
1399 | arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, |
1400 | size_t , bool zero, size_t *newsize) { |
1401 | bool ret; |
1402 | /* Calls with non-zero extra had to clamp extra. */ |
1403 | assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); |
1404 | |
1405 | edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr); |
1406 | if (unlikely(size > SC_LARGE_MAXCLASS)) { |
1407 | ret = true; |
1408 | goto done; |
1409 | } |
1410 | |
1411 | size_t usize_min = sz_s2u(size); |
1412 | size_t usize_max = sz_s2u(size + extra); |
1413 | if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min |
1414 | <= SC_SMALL_MAXCLASS)) { |
1415 | /* |
1416 | * Avoid moving the allocation if the size class can be left the |
1417 | * same. |
1418 | */ |
1419 | assert(bin_infos[sz_size2index(oldsize)].reg_size == |
1420 | oldsize); |
1421 | if ((usize_max > SC_SMALL_MAXCLASS |
1422 | || sz_size2index(usize_max) != sz_size2index(oldsize)) |
1423 | && (size > oldsize || usize_max < oldsize)) { |
1424 | ret = true; |
1425 | goto done; |
1426 | } |
1427 | |
1428 | arena_t *arena = arena_get_from_edata(edata); |
1429 | arena_decay_tick(tsdn, arena); |
1430 | ret = false; |
1431 | } else if (oldsize >= SC_LARGE_MINCLASS |
1432 | && usize_max >= SC_LARGE_MINCLASS) { |
1433 | ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max, |
1434 | zero); |
1435 | } else { |
1436 | ret = true; |
1437 | } |
1438 | done: |
1439 | assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr)); |
1440 | *newsize = edata_usize_get(edata); |
1441 | |
1442 | return ret; |
1443 | } |
1444 | |
1445 | static void * |
1446 | arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, |
1447 | size_t alignment, bool zero, tcache_t *tcache) { |
1448 | if (alignment == 0) { |
1449 | return arena_malloc(tsdn, arena, usize, sz_size2index(usize), |
1450 | zero, tcache, true); |
1451 | } |
1452 | usize = sz_sa2u(usize, alignment); |
1453 | if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { |
1454 | return NULL; |
1455 | } |
1456 | return ipalloct(tsdn, usize, alignment, zero, tcache, arena); |
1457 | } |
1458 | |
1459 | void * |
1460 | arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, |
1461 | size_t size, size_t alignment, bool zero, tcache_t *tcache, |
1462 | hook_ralloc_args_t *hook_args) { |
1463 | size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment); |
1464 | if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { |
1465 | return NULL; |
1466 | } |
1467 | |
1468 | if (likely(usize <= SC_SMALL_MAXCLASS)) { |
1469 | /* Try to avoid moving the allocation. */ |
1470 | UNUSED size_t newsize; |
1471 | if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero, |
1472 | &newsize)) { |
1473 | hook_invoke_expand(hook_args->is_realloc |
1474 | ? hook_expand_realloc : hook_expand_rallocx, |
1475 | ptr, oldsize, usize, (uintptr_t)ptr, |
1476 | hook_args->args); |
1477 | return ptr; |
1478 | } |
1479 | } |
1480 | |
1481 | if (oldsize >= SC_LARGE_MINCLASS |
1482 | && usize >= SC_LARGE_MINCLASS) { |
1483 | return large_ralloc(tsdn, arena, ptr, usize, |
1484 | alignment, zero, tcache, hook_args); |
1485 | } |
1486 | |
1487 | /* |
1488 | * size and oldsize are different enough that we need to move the |
1489 | * object. In that case, fall back to allocating new space and copying. |
1490 | */ |
1491 | void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, |
1492 | zero, tcache); |
1493 | if (ret == NULL) { |
1494 | return NULL; |
1495 | } |
1496 | |
1497 | hook_invoke_alloc(hook_args->is_realloc |
1498 | ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, |
1499 | hook_args->args); |
1500 | hook_invoke_dalloc(hook_args->is_realloc |
1501 | ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); |
1502 | |
1503 | /* |
1504 | * Junk/zero-filling were already done by |
1505 | * ipalloc()/arena_malloc(). |
1506 | */ |
1507 | size_t copysize = (usize < oldsize) ? usize : oldsize; |
1508 | memcpy(ret, ptr, copysize); |
1509 | isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); |
1510 | return ret; |
1511 | } |
1512 | |
1513 | ehooks_t * |
1514 | arena_get_ehooks(arena_t *arena) { |
1515 | return base_ehooks_get(arena->base); |
1516 | } |
1517 | |
1518 | extent_hooks_t * |
1519 | arena_set_extent_hooks(tsd_t *tsd, arena_t *arena, |
1520 | extent_hooks_t *extent_hooks) { |
1521 | background_thread_info_t *info; |
1522 | if (have_background_thread) { |
1523 | info = arena_background_thread_info_get(arena); |
1524 | malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); |
1525 | } |
1526 | /* No using the HPA now that we have the custom hooks. */ |
1527 | pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard); |
1528 | extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); |
1529 | if (have_background_thread) { |
1530 | malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); |
1531 | } |
1532 | |
1533 | return ret; |
1534 | } |
1535 | |
1536 | dss_prec_t |
1537 | arena_dss_prec_get(arena_t *arena) { |
1538 | return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); |
1539 | } |
1540 | |
1541 | bool |
1542 | arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { |
1543 | if (!have_dss) { |
1544 | return (dss_prec != dss_prec_disabled); |
1545 | } |
1546 | atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); |
1547 | return false; |
1548 | } |
1549 | |
1550 | ssize_t |
1551 | arena_dirty_decay_ms_default_get(void) { |
1552 | return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); |
1553 | } |
1554 | |
1555 | bool |
1556 | arena_dirty_decay_ms_default_set(ssize_t decay_ms) { |
1557 | if (!decay_ms_valid(decay_ms)) { |
1558 | return true; |
1559 | } |
1560 | atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); |
1561 | return false; |
1562 | } |
1563 | |
1564 | ssize_t |
1565 | arena_muzzy_decay_ms_default_get(void) { |
1566 | return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); |
1567 | } |
1568 | |
1569 | bool |
1570 | arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { |
1571 | if (!decay_ms_valid(decay_ms)) { |
1572 | return true; |
1573 | } |
1574 | atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); |
1575 | return false; |
1576 | } |
1577 | |
1578 | bool |
1579 | arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, |
1580 | size_t *new_limit) { |
1581 | assert(opt_retain); |
1582 | return pac_retain_grow_limit_get_set(tsd_tsdn(tsd), |
1583 | &arena->pa_shard.pac, old_limit, new_limit); |
1584 | } |
1585 | |
1586 | unsigned |
1587 | arena_nthreads_get(arena_t *arena, bool internal) { |
1588 | return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); |
1589 | } |
1590 | |
1591 | void |
1592 | arena_nthreads_inc(arena_t *arena, bool internal) { |
1593 | atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); |
1594 | } |
1595 | |
1596 | void |
1597 | arena_nthreads_dec(arena_t *arena, bool internal) { |
1598 | atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); |
1599 | } |
1600 | |
1601 | arena_t * |
1602 | arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) { |
1603 | arena_t *arena; |
1604 | base_t *base; |
1605 | unsigned i; |
1606 | |
1607 | if (ind == 0) { |
1608 | base = b0get(); |
1609 | } else { |
1610 | base = base_new(tsdn, ind, config->extent_hooks, |
1611 | config->metadata_use_hooks); |
1612 | if (base == NULL) { |
1613 | return NULL; |
1614 | } |
1615 | } |
1616 | |
1617 | size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total; |
1618 | arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); |
1619 | if (arena == NULL) { |
1620 | goto label_error; |
1621 | } |
1622 | |
1623 | atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); |
1624 | atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); |
1625 | arena->last_thd = NULL; |
1626 | |
1627 | if (config_stats) { |
1628 | if (arena_stats_init(tsdn, &arena->stats)) { |
1629 | goto label_error; |
1630 | } |
1631 | |
1632 | ql_new(&arena->tcache_ql); |
1633 | ql_new(&arena->cache_bin_array_descriptor_ql); |
1634 | if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql" , |
1635 | WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { |
1636 | goto label_error; |
1637 | } |
1638 | } |
1639 | |
1640 | atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), |
1641 | ATOMIC_RELAXED); |
1642 | |
1643 | edata_list_active_init(&arena->large); |
1644 | if (malloc_mutex_init(&arena->large_mtx, "arena_large" , |
1645 | WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { |
1646 | goto label_error; |
1647 | } |
1648 | |
1649 | nstime_t cur_time; |
1650 | nstime_init_update(&cur_time); |
1651 | if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global, |
1652 | &arena_emap_global, base, ind, &arena->stats.pa_shard_stats, |
1653 | LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold, |
1654 | arena_dirty_decay_ms_default_get(), |
1655 | arena_muzzy_decay_ms_default_get())) { |
1656 | goto label_error; |
1657 | } |
1658 | |
1659 | /* Initialize bins. */ |
1660 | atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); |
1661 | for (i = 0; i < nbins_total; i++) { |
1662 | bool err = bin_init(&arena->bins[i]); |
1663 | if (err) { |
1664 | goto label_error; |
1665 | } |
1666 | } |
1667 | |
1668 | arena->base = base; |
1669 | /* Set arena before creating background threads. */ |
1670 | arena_set(ind, arena); |
1671 | arena->ind = ind; |
1672 | |
1673 | nstime_init_update(&arena->create_time); |
1674 | |
1675 | /* |
1676 | * We turn on the HPA if set to. There are two exceptions: |
1677 | * - Custom extent hooks (we should only return memory allocated from |
1678 | * them in that case). |
1679 | * - Arena 0 initialization. In this case, we're mid-bootstrapping, and |
1680 | * so arena_hpa_global is not yet initialized. |
1681 | */ |
1682 | if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) { |
1683 | hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts; |
1684 | hpa_shard_opts.deferral_allowed = background_thread_enabled(); |
1685 | if (pa_shard_enable_hpa(tsdn, &arena->pa_shard, |
1686 | &hpa_shard_opts, &opt_hpa_sec_opts)) { |
1687 | goto label_error; |
1688 | } |
1689 | } |
1690 | |
1691 | /* We don't support reentrancy for arena 0 bootstrapping. */ |
1692 | if (ind != 0) { |
1693 | /* |
1694 | * If we're here, then arena 0 already exists, so bootstrapping |
1695 | * is done enough that we should have tsd. |
1696 | */ |
1697 | assert(!tsdn_null(tsdn)); |
1698 | pre_reentrancy(tsdn_tsd(tsdn), arena); |
1699 | if (test_hooks_arena_new_hook) { |
1700 | test_hooks_arena_new_hook(); |
1701 | } |
1702 | post_reentrancy(tsdn_tsd(tsdn)); |
1703 | } |
1704 | |
1705 | return arena; |
1706 | label_error: |
1707 | if (ind != 0) { |
1708 | base_delete(tsdn, base); |
1709 | } |
1710 | return NULL; |
1711 | } |
1712 | |
1713 | arena_t * |
1714 | arena_choose_huge(tsd_t *tsd) { |
1715 | /* huge_arena_ind can be 0 during init (will use a0). */ |
1716 | if (huge_arena_ind == 0) { |
1717 | assert(!malloc_initialized()); |
1718 | } |
1719 | |
1720 | arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false); |
1721 | if (huge_arena == NULL) { |
1722 | /* Create the huge arena on demand. */ |
1723 | assert(huge_arena_ind != 0); |
1724 | huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true); |
1725 | if (huge_arena == NULL) { |
1726 | return NULL; |
1727 | } |
1728 | /* |
1729 | * Purge eagerly for huge allocations, because: 1) number of |
1730 | * huge allocations is usually small, which means ticker based |
1731 | * decay is not reliable; and 2) less immediate reuse is |
1732 | * expected for huge allocations. |
1733 | */ |
1734 | if (arena_dirty_decay_ms_default_get() > 0) { |
1735 | arena_decay_ms_set(tsd_tsdn(tsd), huge_arena, |
1736 | extent_state_dirty, 0); |
1737 | } |
1738 | if (arena_muzzy_decay_ms_default_get() > 0) { |
1739 | arena_decay_ms_set(tsd_tsdn(tsd), huge_arena, |
1740 | extent_state_muzzy, 0); |
1741 | } |
1742 | } |
1743 | |
1744 | return huge_arena; |
1745 | } |
1746 | |
1747 | bool |
1748 | arena_init_huge(void) { |
1749 | bool huge_enabled; |
1750 | |
1751 | /* The threshold should be large size class. */ |
1752 | if (opt_oversize_threshold > SC_LARGE_MAXCLASS || |
1753 | opt_oversize_threshold < SC_LARGE_MINCLASS) { |
1754 | opt_oversize_threshold = 0; |
1755 | oversize_threshold = SC_LARGE_MAXCLASS + PAGE; |
1756 | huge_enabled = false; |
1757 | } else { |
1758 | /* Reserve the index for the huge arena. */ |
1759 | huge_arena_ind = narenas_total_get(); |
1760 | oversize_threshold = opt_oversize_threshold; |
1761 | huge_enabled = true; |
1762 | } |
1763 | |
1764 | return huge_enabled; |
1765 | } |
1766 | |
1767 | bool |
1768 | arena_is_huge(unsigned arena_ind) { |
1769 | if (huge_arena_ind == 0) { |
1770 | return false; |
1771 | } |
1772 | return (arena_ind == huge_arena_ind); |
1773 | } |
1774 | |
1775 | bool |
1776 | arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) { |
1777 | arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); |
1778 | arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); |
1779 | for (unsigned i = 0; i < SC_NBINS; i++) { |
1780 | sc_t *sc = &sc_data->sc[i]; |
1781 | div_init(&arena_binind_div_info[i], |
1782 | (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); |
1783 | } |
1784 | |
1785 | uint32_t cur_offset = (uint32_t)offsetof(arena_t, bins); |
1786 | for (szind_t i = 0; i < SC_NBINS; i++) { |
1787 | arena_bin_offsets[i] = cur_offset; |
1788 | nbins_total += bin_infos[i].n_shards; |
1789 | cur_offset += (uint32_t)(bin_infos[i].n_shards * sizeof(bin_t)); |
1790 | } |
1791 | return pa_central_init(&arena_pa_central_global, base, hpa, |
1792 | &hpa_hooks_default); |
1793 | } |
1794 | |
1795 | void |
1796 | arena_prefork0(tsdn_t *tsdn, arena_t *arena) { |
1797 | pa_shard_prefork0(tsdn, &arena->pa_shard); |
1798 | } |
1799 | |
1800 | void |
1801 | arena_prefork1(tsdn_t *tsdn, arena_t *arena) { |
1802 | if (config_stats) { |
1803 | malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); |
1804 | } |
1805 | } |
1806 | |
1807 | void |
1808 | arena_prefork2(tsdn_t *tsdn, arena_t *arena) { |
1809 | pa_shard_prefork2(tsdn, &arena->pa_shard); |
1810 | } |
1811 | |
1812 | void |
1813 | arena_prefork3(tsdn_t *tsdn, arena_t *arena) { |
1814 | pa_shard_prefork3(tsdn, &arena->pa_shard); |
1815 | } |
1816 | |
1817 | void |
1818 | arena_prefork4(tsdn_t *tsdn, arena_t *arena) { |
1819 | pa_shard_prefork4(tsdn, &arena->pa_shard); |
1820 | } |
1821 | |
1822 | void |
1823 | arena_prefork5(tsdn_t *tsdn, arena_t *arena) { |
1824 | pa_shard_prefork5(tsdn, &arena->pa_shard); |
1825 | } |
1826 | |
1827 | void |
1828 | arena_prefork6(tsdn_t *tsdn, arena_t *arena) { |
1829 | base_prefork(tsdn, arena->base); |
1830 | } |
1831 | |
1832 | void |
1833 | arena_prefork7(tsdn_t *tsdn, arena_t *arena) { |
1834 | malloc_mutex_prefork(tsdn, &arena->large_mtx); |
1835 | } |
1836 | |
1837 | void |
1838 | arena_prefork8(tsdn_t *tsdn, arena_t *arena) { |
1839 | for (unsigned i = 0; i < nbins_total; i++) { |
1840 | bin_prefork(tsdn, &arena->bins[i]); |
1841 | } |
1842 | } |
1843 | |
1844 | void |
1845 | arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { |
1846 | for (unsigned i = 0; i < nbins_total; i++) { |
1847 | bin_postfork_parent(tsdn, &arena->bins[i]); |
1848 | } |
1849 | |
1850 | malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); |
1851 | base_postfork_parent(tsdn, arena->base); |
1852 | pa_shard_postfork_parent(tsdn, &arena->pa_shard); |
1853 | if (config_stats) { |
1854 | malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); |
1855 | } |
1856 | } |
1857 | |
1858 | void |
1859 | arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { |
1860 | atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); |
1861 | atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); |
1862 | if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { |
1863 | arena_nthreads_inc(arena, false); |
1864 | } |
1865 | if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { |
1866 | arena_nthreads_inc(arena, true); |
1867 | } |
1868 | if (config_stats) { |
1869 | ql_new(&arena->tcache_ql); |
1870 | ql_new(&arena->cache_bin_array_descriptor_ql); |
1871 | tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn)); |
1872 | if (tcache_slow != NULL && tcache_slow->arena == arena) { |
1873 | tcache_t *tcache = tcache_slow->tcache; |
1874 | ql_elm_new(tcache_slow, link); |
1875 | ql_tail_insert(&arena->tcache_ql, tcache_slow, link); |
1876 | cache_bin_array_descriptor_init( |
1877 | &tcache_slow->cache_bin_array_descriptor, |
1878 | tcache->bins); |
1879 | ql_tail_insert(&arena->cache_bin_array_descriptor_ql, |
1880 | &tcache_slow->cache_bin_array_descriptor, link); |
1881 | } |
1882 | } |
1883 | |
1884 | for (unsigned i = 0; i < nbins_total; i++) { |
1885 | bin_postfork_child(tsdn, &arena->bins[i]); |
1886 | } |
1887 | |
1888 | malloc_mutex_postfork_child(tsdn, &arena->large_mtx); |
1889 | base_postfork_child(tsdn, arena->base); |
1890 | pa_shard_postfork_child(tsdn, &arena->pa_shard); |
1891 | if (config_stats) { |
1892 | malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); |
1893 | } |
1894 | } |
1895 | |