1#include "jemalloc/internal/jemalloc_preamble.h"
2
3#include "jemalloc/internal/hook.h"
4
5#include "jemalloc/internal/atomic.h"
6#include "jemalloc/internal/mutex.h"
7#include "jemalloc/internal/seq.h"
8
9typedef struct hooks_internal_s hooks_internal_t;
10struct hooks_internal_s {
11 hooks_t hooks;
12 bool in_use;
13};
14
15seq_define(hooks_internal_t, hooks)
16
17static atomic_u_t nhooks = ATOMIC_INIT(0);
18static seq_hooks_t hooks[HOOK_MAX];
19static malloc_mutex_t hooks_mu;
20
21bool
22hook_boot() {
23 return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
24 malloc_mutex_rank_exclusive);
25}
26
27static void *
28hook_install_locked(hooks_t *to_install) {
29 hooks_internal_t hooks_internal;
30 for (int i = 0; i < HOOK_MAX; i++) {
31 bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
32 /* We hold mu; no concurrent access. */
33 assert(success);
34 if (!hooks_internal.in_use) {
35 hooks_internal.hooks = *to_install;
36 hooks_internal.in_use = true;
37 seq_store_hooks(&hooks[i], &hooks_internal);
38 atomic_store_u(&nhooks,
39 atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
40 ATOMIC_RELAXED);
41 return &hooks[i];
42 }
43 }
44 return NULL;
45}
46
47void *
48hook_install(tsdn_t *tsdn, hooks_t *to_install) {
49 malloc_mutex_lock(tsdn, &hooks_mu);
50 void *ret = hook_install_locked(to_install);
51 if (ret != NULL) {
52 tsd_global_slow_inc(tsdn);
53 }
54 malloc_mutex_unlock(tsdn, &hooks_mu);
55 return ret;
56}
57
58static void
59hook_remove_locked(seq_hooks_t *to_remove) {
60 hooks_internal_t hooks_internal;
61 bool success = seq_try_load_hooks(&hooks_internal, to_remove);
62 /* We hold mu; no concurrent access. */
63 assert(success);
64 /* Should only remove hooks that were added. */
65 assert(hooks_internal.in_use);
66 hooks_internal.in_use = false;
67 seq_store_hooks(to_remove, &hooks_internal);
68 atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
69 ATOMIC_RELAXED);
70}
71
72void
73hook_remove(tsdn_t *tsdn, void *opaque) {
74 if (config_debug) {
75 char *hooks_begin = (char *)&hooks[0];
76 char *hooks_end = (char *)&hooks[HOOK_MAX];
77 char *hook = (char *)opaque;
78 assert(hooks_begin <= hook && hook < hooks_end
79 && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
80 }
81 malloc_mutex_lock(tsdn, &hooks_mu);
82 hook_remove_locked((seq_hooks_t *)opaque);
83 tsd_global_slow_dec(tsdn);
84 malloc_mutex_unlock(tsdn, &hooks_mu);
85}
86
87#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
88for (int for_each_hook_counter = 0; \
89 for_each_hook_counter < HOOK_MAX; \
90 for_each_hook_counter++) { \
91 bool for_each_hook_success = seq_try_load_hooks( \
92 (hooks_internal_ptr), &hooks[for_each_hook_counter]); \
93 if (!for_each_hook_success) { \
94 continue; \
95 } \
96 if (!(hooks_internal_ptr)->in_use) { \
97 continue; \
98 }
99#define FOR_EACH_HOOK_END \
100}
101
102static bool *
103hook_reentrantp() {
104 /*
105 * We prevent user reentrancy within hooks. This is basically just a
106 * thread-local bool that triggers an early-exit.
107 *
108 * We don't fold in_hook into reentrancy. There are two reasons for
109 * this:
110 * - Right now, we turn on reentrancy during things like extent hook
111 * execution. Allocating during extent hooks is not officially
112 * supported, but we don't want to break it for the time being. These
113 * sorts of allocations should probably still be hooked, though.
114 * - If a hook allocates, we may want it to be relatively fast (after
115 * all, it executes on every allocator operation). Turning on
116 * reentrancy is a fairly heavyweight mode (disabling tcache,
117 * redirecting to arena 0, etc.). It's possible we may one day want
118 * to turn on reentrant mode here, if it proves too difficult to keep
119 * this working. But that's fairly easy for us to see; OTOH, people
120 * not using hooks because they're too slow is easy for us to miss.
121 *
122 * The tricky part is
123 * that this code might get invoked even if we don't have access to tsd.
124 * This function mimics getting a pointer to thread-local data, except
125 * that it might secretly return a pointer to some global data if we
126 * know that the caller will take the early-exit path.
127 * If we return a bool that indicates that we are reentrant, then the
128 * caller will go down the early exit path, leaving the global
129 * untouched.
130 */
131 static bool in_hook_global = true;
132 tsdn_t *tsdn = tsdn_fetch();
133 tcache_t *tcache = tsdn_tcachep_get(tsdn);
134 if (tcache != NULL) {
135 return &tcache->in_hook;
136 }
137 return &in_hook_global;
138}
139
140#define HOOK_PROLOGUE \
141 if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
142 return; \
143 } \
144 bool *in_hook = hook_reentrantp(); \
145 if (*in_hook) { \
146 return; \
147 } \
148 *in_hook = true;
149
150#define HOOK_EPILOGUE \
151 *in_hook = false;
152
153void
154hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
155 uintptr_t args_raw[3]) {
156 HOOK_PROLOGUE
157
158 hooks_internal_t hook;
159 FOR_EACH_HOOK_BEGIN(&hook)
160 hook_alloc h = hook.hooks.alloc_hook;
161 if (h != NULL) {
162 h(hook.hooks.extra, type, result, result_raw, args_raw);
163 }
164 FOR_EACH_HOOK_END
165
166 HOOK_EPILOGUE
167}
168
169void
170hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
171 HOOK_PROLOGUE
172 hooks_internal_t hook;
173 FOR_EACH_HOOK_BEGIN(&hook)
174 hook_dalloc h = hook.hooks.dalloc_hook;
175 if (h != NULL) {
176 h(hook.hooks.extra, type, address, args_raw);
177 }
178 FOR_EACH_HOOK_END
179 HOOK_EPILOGUE
180}
181
182void
183hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
184 size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
185 HOOK_PROLOGUE
186 hooks_internal_t hook;
187 FOR_EACH_HOOK_BEGIN(&hook)
188 hook_expand h = hook.hooks.expand_hook;
189 if (h != NULL) {
190 h(hook.hooks.extra, type, address, old_usize, new_usize,
191 result_raw, args_raw);
192 }
193 FOR_EACH_HOOK_END
194 HOOK_EPILOGUE
195}
196