1#ifndef JEMALLOC_INTERNAL_GUARD_H
2#define JEMALLOC_INTERNAL_GUARD_H
3
4#include "jemalloc/internal/ehooks.h"
5#include "jemalloc/internal/emap.h"
6
7#define SAN_PAGE_GUARD PAGE
8#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2)
9
10#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
11#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
12
13#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
14#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
15
16static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL;
17
18/* 0 means disabled, i.e. never guarded. */
19extern size_t opt_san_guard_large;
20extern size_t opt_san_guard_small;
21/* -1 means disabled, i.e. never check for use-after-free. */
22extern ssize_t opt_lg_san_uaf_align;
23
24void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
25 emap_t *emap, bool left, bool right, bool remap);
26void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
27 emap_t *emap, bool left, bool right);
28/*
29 * Unguard the extent, but don't modify emap boundaries. Must be called on an
30 * extent that has been erased from emap and shouldn't be placed back.
31 */
32void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks,
33 edata_t *edata, emap_t *emap);
34void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize);
35
36void tsd_san_init(tsd_t *tsd);
37void san_init(ssize_t lg_san_uaf_align);
38
39static inline void
40san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
41 emap_t *emap, bool remap) {
42 san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap);
43}
44
45static inline void
46san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
47 emap_t *emap) {
48 san_unguard_pages(tsdn, ehooks, edata, emap, true, true);
49}
50
51static inline size_t
52san_two_side_unguarded_sz(size_t size) {
53 assert(size % PAGE == 0);
54 assert(size >= SAN_PAGE_GUARDS_SIZE);
55 return size - SAN_PAGE_GUARDS_SIZE;
56}
57
58static inline size_t
59san_two_side_guarded_sz(size_t size) {
60 assert(size % PAGE == 0);
61 return size + SAN_PAGE_GUARDS_SIZE;
62}
63
64static inline size_t
65san_one_side_unguarded_sz(size_t size) {
66 assert(size % PAGE == 0);
67 assert(size >= SAN_PAGE_GUARD);
68 return size - SAN_PAGE_GUARD;
69}
70
71static inline size_t
72san_one_side_guarded_sz(size_t size) {
73 assert(size % PAGE == 0);
74 return size + SAN_PAGE_GUARD;
75}
76
77static inline bool
78san_guard_enabled(void) {
79 return (opt_san_guard_large != 0 || opt_san_guard_small != 0);
80}
81
82static inline bool
83san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
84 size_t alignment) {
85 if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) ||
86 tsdn_null(tsdn)) {
87 return false;
88 }
89
90 tsd_t *tsd = tsdn_tsd(tsdn);
91 uint64_t n = tsd_san_extents_until_guard_large_get(tsd);
92 assert(n >= 1);
93 if (n > 1) {
94 /*
95 * Subtract conditionally because the guard may not happen due
96 * to alignment or size restriction below.
97 */
98 *tsd_san_extents_until_guard_largep_get(tsd) = n - 1;
99 }
100
101 if (n == 1 && (alignment <= PAGE) &&
102 (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) {
103 *tsd_san_extents_until_guard_largep_get(tsd) =
104 opt_san_guard_large;
105 return true;
106 } else {
107 assert(tsd_san_extents_until_guard_large_get(tsd) >= 1);
108 return false;
109 }
110}
111
112static inline bool
113san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
114 if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) ||
115 tsdn_null(tsdn)) {
116 return false;
117 }
118
119 tsd_t *tsd = tsdn_tsd(tsdn);
120 uint64_t n = tsd_san_extents_until_guard_small_get(tsd);
121 assert(n >= 1);
122 if (n == 1) {
123 *tsd_san_extents_until_guard_smallp_get(tsd) =
124 opt_san_guard_small;
125 return true;
126 } else {
127 *tsd_san_extents_until_guard_smallp_get(tsd) = n - 1;
128 assert(tsd_san_extents_until_guard_small_get(tsd) >= 1);
129 return false;
130 }
131}
132
133static inline void
134san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
135 void **last) {
136 size_t ptr_sz = sizeof(void *);
137
138 *first = ptr;
139
140 *mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
141 assert(*first != *mid || usize == ptr_sz);
142 assert((uintptr_t)*first <= (uintptr_t)*mid);
143
144 /*
145 * When usize > 32K, the gap between requested_size and usize might be
146 * greater than 4K -- this means the last write may access an
147 * likely-untouched page (default settings w/ 4K pages). However by
148 * default the tcache only goes up to the 32K size class, and is usually
149 * tuned lower instead of higher, which makes it less of a concern.
150 */
151 *last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk));
152 assert(*first != *last || usize == ptr_sz);
153 assert(*mid != *last || usize <= ptr_sz * 2);
154 assert((uintptr_t)*mid <= (uintptr_t)*last);
155}
156
157static inline bool
158san_junk_ptr_should_slow(void) {
159 /*
160 * The latter condition (pointer size greater than the min size class)
161 * is not expected -- fall back to the slow path for simplicity.
162 */
163 return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN);
164}
165
166static inline void
167san_junk_ptr(void *ptr, size_t usize) {
168 if (san_junk_ptr_should_slow()) {
169 memset(ptr, (char)uaf_detect_junk, usize);
170 return;
171 }
172
173 void *first, *mid, *last;
174 san_junk_ptr_locations(ptr, usize, &first, &mid, &last);
175 *(uintptr_t *)first = uaf_detect_junk;
176 *(uintptr_t *)mid = uaf_detect_junk;
177 *(uintptr_t *)last = uaf_detect_junk;
178}
179
180static inline bool
181san_uaf_detection_enabled(void) {
182 bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1);
183 if (config_uaf_detection && ret) {
184 assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 <<
185 opt_lg_san_uaf_align) - 1);
186 }
187
188 return ret;
189}
190
191#endif /* JEMALLOC_INTERNAL_GUARD_H */
192