1 | #include "jemalloc/internal/jemalloc_preamble.h" |
2 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
3 | |
4 | #include "jemalloc/internal/assert.h" |
5 | #include "jemalloc/internal/ehooks.h" |
6 | #include "jemalloc/internal/san.h" |
7 | #include "jemalloc/internal/tsd.h" |
8 | |
9 | /* The sanitizer options. */ |
10 | size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT; |
11 | size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT; |
12 | |
13 | /* Aligned (-1 is off) ptrs will be junked & stashed on dealloc. */ |
14 | ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT; |
15 | |
16 | /* |
17 | * Initialized in san_init(). When disabled, the mask is set to (uintptr_t)-1 |
18 | * to always fail the nonfast_align check. |
19 | */ |
20 | uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT; |
21 | |
22 | static inline void |
23 | san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2, |
24 | uintptr_t *addr, size_t size, bool left, bool right) { |
25 | assert(!edata_guarded_get(edata)); |
26 | assert(size % PAGE == 0); |
27 | *addr = (uintptr_t)edata_base_get(edata); |
28 | if (left) { |
29 | *guard1 = *addr; |
30 | *addr += SAN_PAGE_GUARD; |
31 | } else { |
32 | *guard1 = 0; |
33 | } |
34 | |
35 | if (right) { |
36 | *guard2 = *addr + size; |
37 | } else { |
38 | *guard2 = 0; |
39 | } |
40 | } |
41 | |
42 | static inline void |
43 | san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2, |
44 | uintptr_t *addr, size_t size, bool left, bool right) { |
45 | assert(edata_guarded_get(edata)); |
46 | assert(size % PAGE == 0); |
47 | *addr = (uintptr_t)edata_base_get(edata); |
48 | if (right) { |
49 | *guard2 = *addr + size; |
50 | } else { |
51 | *guard2 = 0; |
52 | } |
53 | |
54 | if (left) { |
55 | *guard1 = *addr - SAN_PAGE_GUARD; |
56 | assert(*guard1 != 0); |
57 | *addr = *guard1; |
58 | } else { |
59 | *guard1 = 0; |
60 | } |
61 | } |
62 | |
63 | void |
64 | san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap, |
65 | bool left, bool right, bool remap) { |
66 | assert(left || right); |
67 | if (remap) { |
68 | emap_deregister_boundary(tsdn, emap, edata); |
69 | } |
70 | |
71 | size_t size_with_guards = edata_size_get(edata); |
72 | size_t usize = (left && right) |
73 | ? san_two_side_unguarded_sz(size_with_guards) |
74 | : san_one_side_unguarded_sz(size_with_guards); |
75 | |
76 | uintptr_t guard1, guard2, addr; |
77 | san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left, |
78 | right); |
79 | |
80 | assert(edata_state_get(edata) == extent_state_active); |
81 | ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2); |
82 | |
83 | /* Update the guarded addr and usable size of the edata. */ |
84 | edata_size_set(edata, usize); |
85 | edata_addr_set(edata, (void *)addr); |
86 | edata_guarded_set(edata, true); |
87 | |
88 | if (remap) { |
89 | emap_register_boundary(tsdn, emap, edata, SC_NSIZES, |
90 | /* slab */ false); |
91 | } |
92 | } |
93 | |
94 | static void |
95 | san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
96 | emap_t *emap, bool left, bool right, bool remap) { |
97 | assert(left || right); |
98 | /* Remove the inner boundary which no longer exists. */ |
99 | if (remap) { |
100 | assert(edata_state_get(edata) == extent_state_active); |
101 | emap_deregister_boundary(tsdn, emap, edata); |
102 | } else { |
103 | assert(edata_state_get(edata) == extent_state_retained); |
104 | } |
105 | |
106 | size_t size = edata_size_get(edata); |
107 | size_t size_with_guards = (left && right) |
108 | ? san_two_side_guarded_sz(size) |
109 | : san_one_side_guarded_sz(size); |
110 | |
111 | uintptr_t guard1, guard2, addr; |
112 | san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left, |
113 | right); |
114 | |
115 | ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2); |
116 | |
117 | /* Update the true addr and usable size of the edata. */ |
118 | edata_size_set(edata, size_with_guards); |
119 | edata_addr_set(edata, (void *)addr); |
120 | edata_guarded_set(edata, false); |
121 | |
122 | /* |
123 | * Then re-register the outer boundary including the guards, if |
124 | * requested. |
125 | */ |
126 | if (remap) { |
127 | emap_register_boundary(tsdn, emap, edata, SC_NSIZES, |
128 | /* slab */ false); |
129 | } |
130 | } |
131 | |
132 | void |
133 | san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
134 | emap_t *emap, bool left, bool right) { |
135 | san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right, |
136 | /* remap */ true); |
137 | } |
138 | |
139 | void |
140 | san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, |
141 | emap_t *emap) { |
142 | emap_assert_not_mapped(tsdn, emap, edata); |
143 | /* |
144 | * We don't want to touch the emap of about to be destroyed extents, as |
145 | * they have been unmapped upon eviction from the retained ecache. Also, |
146 | * we unguard the extents to the right, because retained extents only |
147 | * own their right guard page per san_bump_alloc's logic. |
148 | */ |
149 | san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false, |
150 | /* right */ true, /* remap */ false); |
151 | } |
152 | |
153 | static bool |
154 | san_stashed_corrupted(void *ptr, size_t size) { |
155 | if (san_junk_ptr_should_slow()) { |
156 | for (size_t i = 0; i < size; i++) { |
157 | if (((char *)ptr)[i] != (char)uaf_detect_junk) { |
158 | return true; |
159 | } |
160 | } |
161 | return false; |
162 | } |
163 | |
164 | void *first, *mid, *last; |
165 | san_junk_ptr_locations(ptr, size, &first, &mid, &last); |
166 | if (*(uintptr_t *)first != uaf_detect_junk || |
167 | *(uintptr_t *)mid != uaf_detect_junk || |
168 | *(uintptr_t *)last != uaf_detect_junk) { |
169 | return true; |
170 | } |
171 | |
172 | return false; |
173 | } |
174 | |
175 | void |
176 | san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) { |
177 | /* |
178 | * Verify that the junked-filled & stashed pointers remain unchanged, to |
179 | * detect write-after-free. |
180 | */ |
181 | for (size_t n = 0; n < nstashed; n++) { |
182 | void *stashed = ptrs[n]; |
183 | assert(stashed != NULL); |
184 | assert(cache_bin_nonfast_aligned(stashed)); |
185 | if (unlikely(san_stashed_corrupted(stashed, usize))) { |
186 | safety_check_fail("<jemalloc>: Write-after-free " |
187 | "detected on deallocated pointer %p (size %zu).\n" , |
188 | stashed, usize); |
189 | } |
190 | } |
191 | } |
192 | |
193 | void |
194 | tsd_san_init(tsd_t *tsd) { |
195 | *tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small; |
196 | *tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large; |
197 | } |
198 | |
199 | void |
200 | san_init(ssize_t lg_san_uaf_align) { |
201 | assert(lg_san_uaf_align == -1 || lg_san_uaf_align >= LG_PAGE); |
202 | if (lg_san_uaf_align == -1) { |
203 | san_cache_bin_nonfast_mask = (uintptr_t)-1; |
204 | return; |
205 | } |
206 | |
207 | san_cache_bin_nonfast_mask = ((uintptr_t)1 << lg_san_uaf_align) - 1; |
208 | } |
209 | |