1#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
2#define JEMALLOC_INTERNAL_SAN_BUMP_H
3
4#include "jemalloc/internal/edata.h"
5#include "jemalloc/internal/exp_grow.h"
6#include "jemalloc/internal/mutex.h"
7
8#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
9
10extern bool opt_retain;
11
12typedef struct ehooks_s ehooks_t;
13typedef struct pac_s pac_t;
14
15typedef struct san_bump_alloc_s san_bump_alloc_t;
16struct san_bump_alloc_s {
17 malloc_mutex_t mtx;
18
19 edata_t *curr_reg;
20};
21
22static inline bool
23san_bump_enabled() {
24 /*
25 * We enable san_bump allocator only when it's possible to break up a
26 * mapping and unmap a part of it (maps_coalesce). This is needed to
27 * ensure the arena destruction process can destroy all retained guarded
28 * extents one by one and to unmap a trailing part of a retained guarded
29 * region when it's too small to fit a pending allocation.
30 * opt_retain is required, because this allocator retains a large
31 * virtual memory mapping and returns smaller parts of it.
32 */
33 return maps_coalesce && opt_retain;
34}
35
36static inline bool
37san_bump_alloc_init(san_bump_alloc_t* sba) {
38 bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
39 WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
40 if (err) {
41 return true;
42 }
43 sba->curr_reg = NULL;
44
45 return false;
46}
47
48edata_t *
49san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
50 size_t size, bool zero);
51
52#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */
53