1 | #include "jemalloc/internal/jemalloc_preamble.h" |
2 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
3 | |
4 | #include "jemalloc/internal/san_bump.h" |
5 | #include "jemalloc/internal/pac.h" |
6 | #include "jemalloc/internal/san.h" |
7 | #include "jemalloc/internal/ehooks.h" |
8 | #include "jemalloc/internal/edata_cache.h" |
9 | |
10 | static bool |
11 | san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, |
12 | ehooks_t *ehooks, size_t size); |
13 | |
14 | edata_t * |
15 | san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, |
16 | ehooks_t *ehooks, size_t size, bool zero) { |
17 | assert(san_bump_enabled()); |
18 | |
19 | edata_t* to_destroy; |
20 | size_t guarded_size = san_one_side_guarded_sz(size); |
21 | |
22 | malloc_mutex_lock(tsdn, &sba->mtx); |
23 | |
24 | if (sba->curr_reg == NULL || |
25 | edata_size_get(sba->curr_reg) < guarded_size) { |
26 | /* |
27 | * If the current region can't accommodate the allocation, |
28 | * try replacing it with a larger one and destroy current if the |
29 | * replacement succeeds. |
30 | */ |
31 | to_destroy = sba->curr_reg; |
32 | bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks, |
33 | guarded_size); |
34 | if (err) { |
35 | goto label_err; |
36 | } |
37 | } else { |
38 | to_destroy = NULL; |
39 | } |
40 | assert(guarded_size <= edata_size_get(sba->curr_reg)); |
41 | size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size; |
42 | |
43 | edata_t* edata; |
44 | if (trail_size != 0) { |
45 | edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac, |
46 | ehooks, sba->curr_reg, guarded_size, trail_size, |
47 | /* holding_core_locks */ true); |
48 | if (curr_reg_trail == NULL) { |
49 | goto label_err; |
50 | } |
51 | edata = sba->curr_reg; |
52 | sba->curr_reg = curr_reg_trail; |
53 | } else { |
54 | edata = sba->curr_reg; |
55 | sba->curr_reg = NULL; |
56 | } |
57 | |
58 | malloc_mutex_unlock(tsdn, &sba->mtx); |
59 | |
60 | assert(!edata_guarded_get(edata)); |
61 | assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg)); |
62 | assert(to_destroy == NULL || !edata_guarded_get(to_destroy)); |
63 | |
64 | if (to_destroy != NULL) { |
65 | extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy); |
66 | } |
67 | |
68 | san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false, |
69 | /* right */ true, /* remap */ true); |
70 | |
71 | if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero, |
72 | /* growing_retained */ false)) { |
73 | extent_record(tsdn, pac, ehooks, &pac->ecache_retained, |
74 | edata); |
75 | return NULL; |
76 | } |
77 | |
78 | if (config_prof) { |
79 | extent_gdump_add(tsdn, edata); |
80 | } |
81 | |
82 | return edata; |
83 | label_err: |
84 | malloc_mutex_unlock(tsdn, &sba->mtx); |
85 | return NULL; |
86 | } |
87 | |
88 | static bool |
89 | san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, |
90 | ehooks_t *ehooks, size_t size) { |
91 | malloc_mutex_assert_owner(tsdn, &sba->mtx); |
92 | |
93 | bool committed = false, zeroed = false; |
94 | size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size : |
95 | SBA_RETAINED_ALLOC_SIZE; |
96 | assert((alloc_size & PAGE_MASK) == 0); |
97 | sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL, |
98 | alloc_size, PAGE, zeroed, &committed, |
99 | /* growing_retained */ true); |
100 | if (sba->curr_reg == NULL) { |
101 | return true; |
102 | } |
103 | return false; |
104 | } |
105 | |