1#include "jemalloc/internal/jemalloc_preamble.h"
2#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4#include "jemalloc/internal/assert.h"
5#include "jemalloc/internal/bin.h"
6#include "jemalloc/internal/sc.h"
7#include "jemalloc/internal/witness.h"
8
9bin_info_t bin_infos[SC_NBINS];
10
11static void
12bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
13 bin_info_t bin_infos[SC_NBINS]) {
14 for (unsigned i = 0; i < SC_NBINS; i++) {
15 bin_info_t *bin_info = &bin_infos[i];
16 sc_t *sc = &sc_data->sc[i];
17 bin_info->reg_size = ((size_t)1U << sc->lg_base)
18 + ((size_t)sc->ndelta << sc->lg_delta);
19 bin_info->slab_size = (sc->pgs << LG_PAGE);
20 bin_info->nregs =
21 (uint32_t)(bin_info->slab_size / bin_info->reg_size);
22 bin_info->n_shards = bin_shard_sizes[i];
23 bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
24 bin_info->nregs);
25 bin_info->bitmap_info = bitmap_info;
26 }
27}
28
29bool
30bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
31 size_t end_size, size_t nshards) {
32 if (nshards > BIN_SHARDS_MAX || nshards == 0) {
33 return true;
34 }
35
36 if (start_size > SC_SMALL_MAXCLASS) {
37 return false;
38 }
39 if (end_size > SC_SMALL_MAXCLASS) {
40 end_size = SC_SMALL_MAXCLASS;
41 }
42
43 /* Compute the index since this may happen before sz init. */
44 szind_t ind1 = sz_size2index_compute(start_size);
45 szind_t ind2 = sz_size2index_compute(end_size);
46 for (unsigned i = ind1; i <= ind2; i++) {
47 bin_shard_sizes[i] = (unsigned)nshards;
48 }
49
50 return false;
51}
52
53void
54bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
55 /* Load the default number of shards. */
56 for (unsigned i = 0; i < SC_NBINS; i++) {
57 bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT;
58 }
59}
60
61void
62bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
63 assert(sc_data->initialized);
64 bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
65}
66
67bool
68bin_init(bin_t *bin) {
69 if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
70 malloc_mutex_rank_exclusive)) {
71 return true;
72 }
73 bin->slabcur = NULL;
74 extent_heap_new(&bin->slabs_nonfull);
75 extent_list_init(&bin->slabs_full);
76 if (config_stats) {
77 memset(&bin->stats, 0, sizeof(bin_stats_t));
78 }
79 return false;
80}
81
82void
83bin_prefork(tsdn_t *tsdn, bin_t *bin) {
84 malloc_mutex_prefork(tsdn, &bin->lock);
85}
86
87void
88bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
89 malloc_mutex_postfork_parent(tsdn, &bin->lock);
90}
91
92void
93bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
94 malloc_mutex_postfork_child(tsdn, &bin->lock);
95}
96