1#include "jemalloc/internal/jemalloc_preamble.h"
2#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4#include "jemalloc/internal/ehooks.h"
5#include "jemalloc/internal/extent_mmap.h"
6
7void
8ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
9 /* All other hooks are optional; this one is not. */
10 assert(extent_hooks->alloc != NULL);
11 ehooks->ind = ind;
12 ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
13}
14
15/*
16 * If the caller specifies (!*zero), it is still possible to receive zeroed
17 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
18 * advantage of this to avoid demanding zeroed extents, but taking advantage of
19 * them if they are returned.
20 */
21static void *
22extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
23 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
24 void *ret;
25
26 assert(size != 0);
27 assert(alignment != 0);
28
29 /* "primary" dss. */
30 if (have_dss && dss_prec == dss_prec_primary && (ret =
31 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
32 commit)) != NULL) {
33 return ret;
34 }
35 /* mmap. */
36 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
37 != NULL) {
38 return ret;
39 }
40 /* "secondary" dss. */
41 if (have_dss && dss_prec == dss_prec_secondary && (ret =
42 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
43 commit)) != NULL) {
44 return ret;
45 }
46
47 /* All strategies for allocation failed. */
48 return NULL;
49}
50
51void *
52ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
53 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
54 arena_t *arena = arena_get(tsdn, arena_ind, false);
55 /* NULL arena indicates arena_create. */
56 assert(arena != NULL || alignment == HUGEPAGE);
57 dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
58 (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
59 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
60 zero, commit, dss);
61 if (have_madvise_huge && ret) {
62 pages_set_thp_state(ret, size);
63 }
64 return ret;
65}
66
67static void *
68ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
69 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
70 return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
71 ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
72}
73
74bool
75ehooks_default_dalloc_impl(void *addr, size_t size) {
76 if (!have_dss || !extent_in_dss(addr)) {
77 return extent_dalloc_mmap(addr, size);
78 }
79 return true;
80}
81
82static bool
83ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
84 bool committed, unsigned arena_ind) {
85 return ehooks_default_dalloc_impl(addr, size);
86}
87
88void
89ehooks_default_destroy_impl(void *addr, size_t size) {
90 if (!have_dss || !extent_in_dss(addr)) {
91 pages_unmap(addr, size);
92 }
93}
94
95static void
96ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
97 bool committed, unsigned arena_ind) {
98 ehooks_default_destroy_impl(addr, size);
99}
100
101bool
102ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
103 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
104 length);
105}
106
107static bool
108ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
109 size_t offset, size_t length, unsigned arena_ind) {
110 return ehooks_default_commit_impl(addr, offset, length);
111}
112
113bool
114ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
115 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
116 length);
117}
118
119static bool
120ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
121 size_t offset, size_t length, unsigned arena_ind) {
122 return ehooks_default_decommit_impl(addr, offset, length);
123}
124
125#ifdef PAGES_CAN_PURGE_LAZY
126bool
127ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
128 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
129 length);
130}
131
132static bool
133ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
134 size_t offset, size_t length, unsigned arena_ind) {
135 assert(addr != NULL);
136 assert((offset & PAGE_MASK) == 0);
137 assert(length != 0);
138 assert((length & PAGE_MASK) == 0);
139 return ehooks_default_purge_lazy_impl(addr, offset, length);
140}
141#endif
142
143#ifdef PAGES_CAN_PURGE_FORCED
144bool
145ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
146 return pages_purge_forced((void *)((uintptr_t)addr +
147 (uintptr_t)offset), length);
148}
149
150static bool
151ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
152 size_t size, size_t offset, size_t length, unsigned arena_ind) {
153 assert(addr != NULL);
154 assert((offset & PAGE_MASK) == 0);
155 assert(length != 0);
156 assert((length & PAGE_MASK) == 0);
157 return ehooks_default_purge_forced_impl(addr, offset, length);
158}
159#endif
160
161bool
162ehooks_default_split_impl() {
163 if (!maps_coalesce) {
164 /*
165 * Without retain, only whole regions can be purged (required by
166 * MEM_RELEASE on Windows) -- therefore disallow splitting. See
167 * comments in extent_head_no_merge().
168 */
169 return !opt_retain;
170 }
171
172 return false;
173}
174
175static bool
176ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
177 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
178 return ehooks_default_split_impl();
179}
180
181bool
182ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
183 assert(addr_a < addr_b);
184 /*
185 * For non-DSS cases --
186 * a) W/o maps_coalesce, merge is not always allowed (Windows):
187 * 1) w/o retain, never merge (first branch below).
188 * 2) with retain, only merge extents from the same VirtualAlloc
189 * region (in which case MEM_DECOMMIT is utilized for purging).
190 *
191 * b) With maps_coalesce, it's always possible to merge.
192 * 1) w/o retain, always allow merge (only about dirty / muzzy).
193 * 2) with retain, to preserve the SN / first-fit, merge is still
194 * disallowed if b is a head extent, i.e. no merging across
195 * different mmap regions.
196 *
197 * a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
198 * sanity checked in the second branch below.
199 */
200 if (!maps_coalesce && !opt_retain) {
201 return true;
202 }
203 if (config_debug) {
204 edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
205 addr_a);
206 bool head_a = edata_is_head_get(a);
207 edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
208 addr_b);
209 bool head_b = edata_is_head_get(b);
210 emap_assert_mapped(tsdn, &arena_emap_global, a);
211 emap_assert_mapped(tsdn, &arena_emap_global, b);
212 assert(extent_neighbor_head_state_mergeable(head_a, head_b,
213 /* forward */ true));
214 }
215 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
216 return true;
217 }
218
219 return false;
220}
221
222bool
223ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
224 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
225 tsdn_t *tsdn = tsdn_fetch();
226
227 return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
228}
229
230void
231ehooks_default_zero_impl(void *addr, size_t size) {
232 /*
233 * By default, we try to zero out memory using OS-provided demand-zeroed
234 * pages. If the user has specifically requested hugepages, though, we
235 * don't want to purge in the middle of a hugepage (which would break it
236 * up), so we act conservatively and use memset.
237 */
238 bool needs_memset = true;
239 if (opt_thp != thp_mode_always) {
240 needs_memset = pages_purge_forced(addr, size);
241 }
242 if (needs_memset) {
243 memset(addr, 0, size);
244 }
245}
246
247void
248ehooks_default_guard_impl(void *guard1, void *guard2) {
249 pages_mark_guards(guard1, guard2);
250}
251
252void
253ehooks_default_unguard_impl(void *guard1, void *guard2) {
254 pages_unmark_guards(guard1, guard2);
255}
256
257const extent_hooks_t ehooks_default_extent_hooks = {
258 ehooks_default_alloc,
259 ehooks_default_dalloc,
260 ehooks_default_destroy,
261 ehooks_default_commit,
262 ehooks_default_decommit,
263#ifdef PAGES_CAN_PURGE_LAZY
264 ehooks_default_purge_lazy,
265#else
266 NULL,
267#endif
268#ifdef PAGES_CAN_PURGE_FORCED
269 ehooks_default_purge_forced,
270#else
271 NULL,
272#endif
273 ehooks_default_split,
274 ehooks_default_merge
275};
276