1#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
2#define JEMALLOC_INTERNAL_RTREE_CTX_H
3
4/*
5 * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
6 * entry supports an entire leaf, so the cache hit rate is typically high even
7 * with a small number of entries. In rare cases extent activity will straddle
8 * the boundary between two leaf nodes. Furthermore, an arena may use a
9 * combination of dss and mmap. Note that as memory usage grows past the amount
10 * that this cache can directly cover, the cache will become less effective if
11 * locality of reference is low, but the consequence is merely cache misses
12 * while traversing the tree nodes.
13 *
14 * The L1 direct mapped cache offers consistent and low cost on cache hit.
15 * However collision could affect hit rate negatively. This is resolved by
16 * combining with a L2 LRU cache, which requires linear search and re-ordering
17 * on access but suffers no collision. Note that, the cache will itself suffer
18 * cache misses if made overly large, plus the cost of linear search in the LRU
19 * cache.
20 */
21#define RTREE_CTX_LG_NCACHE 4
22#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
23#define RTREE_CTX_NCACHE_L2 8
24
25/*
26 * Zero initializer required for tsd initialization only. Proper initialization
27 * done via rtree_ctx_data_init().
28 */
29#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
30
31
32typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
33
34typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
35struct rtree_ctx_cache_elm_s {
36 uintptr_t leafkey;
37 rtree_leaf_elm_t *leaf;
38};
39
40typedef struct rtree_ctx_s rtree_ctx_t;
41struct rtree_ctx_s {
42 /* Direct mapped cache. */
43 rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
44 /* L2 LRU cache. */
45 rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
46};
47
48void rtree_ctx_data_init(rtree_ctx_t *ctx);
49
50#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
51