1 | #ifndef JEMALLOC_INTERNAL_EXTENT_H |
2 | #define JEMALLOC_INTERNAL_EXTENT_H |
3 | |
4 | #include "jemalloc/internal/ecache.h" |
5 | #include "jemalloc/internal/ehooks.h" |
6 | #include "jemalloc/internal/ph.h" |
7 | #include "jemalloc/internal/rtree.h" |
8 | |
9 | /* |
10 | * This module contains the page-level allocator. It chooses the addresses that |
11 | * allocations requested by other modules will inhabit, and updates the global |
12 | * metadata to reflect allocation/deallocation/purging decisions. |
13 | */ |
14 | |
15 | /* |
16 | * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) |
17 | * is the max ratio between the size of the active extent and the new extent. |
18 | */ |
19 | #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 |
20 | extern size_t opt_lg_extent_max_active_fit; |
21 | |
22 | edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
23 | ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, |
24 | bool zero, bool guarded); |
25 | edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
26 | ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment, |
27 | bool zero, bool guarded); |
28 | void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
29 | ecache_t *ecache, edata_t *edata); |
30 | edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
31 | ecache_t *ecache, size_t npages_min); |
32 | |
33 | void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata); |
34 | void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, |
35 | edata_t *edata); |
36 | void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
37 | edata_t *edata); |
38 | edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
39 | void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, |
40 | bool growing_retained); |
41 | void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
42 | edata_t *edata); |
43 | void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
44 | edata_t *edata); |
45 | bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
46 | size_t offset, size_t length); |
47 | bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
48 | size_t offset, size_t length); |
49 | bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
50 | size_t offset, size_t length); |
51 | bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
52 | size_t offset, size_t length); |
53 | edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, |
54 | ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b, |
55 | bool holding_core_locks); |
56 | bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, |
57 | edata_t *a, edata_t *b); |
58 | bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
59 | bool commit, bool zero, bool growing_retained); |
60 | size_t extent_sn_next(pac_t *pac); |
61 | bool extent_boot(void); |
62 | |
63 | JEMALLOC_ALWAYS_INLINE bool |
64 | extent_neighbor_head_state_mergeable(bool edata_is_head, |
65 | bool neighbor_is_head, bool forward) { |
66 | /* |
67 | * Head states checking: disallow merging if the higher addr extent is a |
68 | * head extent. This helps preserve first-fit, and more importantly |
69 | * makes sure no merge across arenas. |
70 | */ |
71 | if (forward) { |
72 | if (neighbor_is_head) { |
73 | return false; |
74 | } |
75 | } else { |
76 | if (edata_is_head) { |
77 | return false; |
78 | } |
79 | } |
80 | return true; |
81 | } |
82 | |
83 | JEMALLOC_ALWAYS_INLINE bool |
84 | extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents, |
85 | extent_pai_t pai, extent_state_t expected_state, bool forward, |
86 | bool expanding) { |
87 | edata_t *neighbor = contents.edata; |
88 | if (neighbor == NULL) { |
89 | return false; |
90 | } |
91 | /* It's not safe to access *neighbor yet; must verify states first. */ |
92 | bool neighbor_is_head = contents.metadata.is_head; |
93 | if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata), |
94 | neighbor_is_head, forward)) { |
95 | return false; |
96 | } |
97 | extent_state_t neighbor_state = contents.metadata.state; |
98 | if (pai == EXTENT_PAI_PAC) { |
99 | if (neighbor_state != expected_state) { |
100 | return false; |
101 | } |
102 | /* From this point, it's safe to access *neighbor. */ |
103 | if (!expanding && (edata_committed_get(edata) != |
104 | edata_committed_get(neighbor))) { |
105 | /* |
106 | * Some platforms (e.g. Windows) require an explicit |
107 | * commit step (and writing to uncommitted memory is not |
108 | * allowed). |
109 | */ |
110 | return false; |
111 | } |
112 | } else { |
113 | if (neighbor_state == extent_state_active) { |
114 | return false; |
115 | } |
116 | /* From this point, it's safe to access *neighbor. */ |
117 | } |
118 | |
119 | assert(edata_pai_get(edata) == pai); |
120 | if (edata_pai_get(neighbor) != pai) { |
121 | return false; |
122 | } |
123 | if (opt_retain) { |
124 | assert(edata_arena_ind_get(edata) == |
125 | edata_arena_ind_get(neighbor)); |
126 | } else { |
127 | if (edata_arena_ind_get(edata) != |
128 | edata_arena_ind_get(neighbor)) { |
129 | return false; |
130 | } |
131 | } |
132 | assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor)); |
133 | |
134 | return true; |
135 | } |
136 | |
137 | #endif /* JEMALLOC_INTERNAL_EXTENT_H */ |
138 | |