1#include "jemalloc/internal/jemalloc_preamble.h"
2#include "jemalloc/internal/jemalloc_internal_includes.h"
3#include "jemalloc/internal/sz.h"
4
5JEMALLOC_ALIGNED(CACHELINE)
6size_t sz_pind2sz_tab[SC_NPSIZES+1];
7size_t sz_large_pad;
8
9size_t
10sz_psz_quantize_floor(size_t size) {
11 size_t ret;
12 pszind_t pind;
13
14 assert(size > 0);
15 assert((size & PAGE_MASK) == 0);
16
17 pind = sz_psz2ind(size - sz_large_pad + 1);
18 if (pind == 0) {
19 /*
20 * Avoid underflow. This short-circuit would also do the right
21 * thing for all sizes in the range for which there are
22 * PAGE-spaced size classes, but it's simplest to just handle
23 * the one case that would cause erroneous results.
24 */
25 return size;
26 }
27 ret = sz_pind2sz(pind - 1) + sz_large_pad;
28 assert(ret <= size);
29 return ret;
30}
31
32size_t
33sz_psz_quantize_ceil(size_t size) {
34 size_t ret;
35
36 assert(size > 0);
37 assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
38 assert((size & PAGE_MASK) == 0);
39
40 ret = sz_psz_quantize_floor(size);
41 if (ret < size) {
42 /*
43 * Skip a quantization that may have an adequately large extent,
44 * because under-sized extents may be mixed in. This only
45 * happens when an unusual size is requested, i.e. for aligned
46 * allocation, and is just one of several places where linear
47 * search would potentially find sufficiently aligned available
48 * memory somewhere lower.
49 */
50 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
51 sz_large_pad;
52 }
53 return ret;
54}
55
56static void
57sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
58 int pind = 0;
59 for (unsigned i = 0; i < SC_NSIZES; i++) {
60 const sc_t *sc = &sc_data->sc[i];
61 if (sc->psz) {
62 sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base)
63 + (ZU(sc->ndelta) << sc->lg_delta);
64 pind++;
65 }
66 }
67 for (int i = pind; i <= (int)SC_NPSIZES; i++) {
68 sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
69 }
70}
71
72JEMALLOC_ALIGNED(CACHELINE)
73size_t sz_index2size_tab[SC_NSIZES];
74
75static void
76sz_boot_index2size_tab(const sc_data_t *sc_data) {
77 for (unsigned i = 0; i < SC_NSIZES; i++) {
78 const sc_t *sc = &sc_data->sc[i];
79 sz_index2size_tab[i] = (ZU(1) << sc->lg_base)
80 + (ZU(sc->ndelta) << (sc->lg_delta));
81 }
82}
83
84/*
85 * To keep this table small, we divide sizes by the tiny min size, which gives
86 * the smallest interval for which the result can change.
87 */
88JEMALLOC_ALIGNED(CACHELINE)
89uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1];
90
91static void
92sz_boot_size2index_tab(const sc_data_t *sc_data) {
93 size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
94 size_t dst_ind = 0;
95 for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
96 sc_ind++) {
97 const sc_t *sc = &sc_data->sc[sc_ind];
98 size_t sz = (ZU(1) << sc->lg_base)
99 + (ZU(sc->ndelta) << sc->lg_delta);
100 size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1)
101 >> SC_LG_TINY_MIN);
102 for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
103 sz_size2index_tab[dst_ind] = sc_ind;
104 }
105 }
106}
107
108void
109sz_boot(const sc_data_t *sc_data, bool cache_oblivious) {
110 sz_large_pad = cache_oblivious ? PAGE : 0;
111 sz_boot_pind2sz_tab(sc_data);
112 sz_boot_index2size_tab(sc_data);
113 sz_boot_size2index_tab(sc_data);
114}
115