1#include "jemalloc/internal/jemalloc_preamble.h"
2#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4#include "jemalloc/internal/assert.h"
5#include "jemalloc/internal/malloc_io.h"
6#include "jemalloc/internal/spin.h"
7
8#ifndef _CRT_SPINCOUNT
9#define _CRT_SPINCOUNT 4000
10#endif
11
12/*
13 * Based on benchmark results, a fixed spin with this amount of retries works
14 * well for our critical sections.
15 */
16int64_t opt_mutex_max_spin = 600;
17
18/******************************************************************************/
19/* Data. */
20
21#ifdef JEMALLOC_LAZY_LOCK
22bool isthreaded = false;
23#endif
24#ifdef JEMALLOC_MUTEX_INIT_CB
25static bool postpone_init = true;
26static malloc_mutex_t *postponed_mutexes = NULL;
27#endif
28
29/******************************************************************************/
30/*
31 * We intercept pthread_create() calls in order to toggle isthreaded if the
32 * process goes multi-threaded.
33 */
34
35#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
36JEMALLOC_EXPORT int
37pthread_create(pthread_t *__restrict thread,
38 const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
39 void *__restrict arg) {
40 return pthread_create_wrapper(thread, attr, start_routine, arg);
41}
42#endif
43
44/******************************************************************************/
45
46#ifdef JEMALLOC_MUTEX_INIT_CB
47JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
48 void *(calloc_cb)(size_t, size_t));
49#endif
50
51void
52malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
53 mutex_prof_data_t *data = &mutex->prof_data;
54 nstime_t before;
55
56 if (ncpus == 1) {
57 goto label_spin_done;
58 }
59
60 int cnt = 0;
61 do {
62 spin_cpu_spinwait();
63 if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
64 && !malloc_mutex_trylock_final(mutex)) {
65 data->n_spin_acquired++;
66 return;
67 }
68 } while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1);
69
70 if (!config_stats) {
71 /* Only spin is useful when stats is off. */
72 malloc_mutex_lock_final(mutex);
73 return;
74 }
75label_spin_done:
76 nstime_init_update(&before);
77 /* Copy before to after to avoid clock skews. */
78 nstime_t after;
79 nstime_copy(&after, &before);
80 uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
81 ATOMIC_RELAXED) + 1;
82 /* One last try as above two calls may take quite some cycles. */
83 if (!malloc_mutex_trylock_final(mutex)) {
84 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
85 data->n_spin_acquired++;
86 return;
87 }
88
89 /* True slow path. */
90 malloc_mutex_lock_final(mutex);
91 /* Update more slow-path only counters. */
92 atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
93 nstime_update(&after);
94
95 nstime_t delta;
96 nstime_copy(&delta, &after);
97 nstime_subtract(&delta, &before);
98
99 data->n_wait_times++;
100 nstime_add(&data->tot_wait_time, &delta);
101 if (nstime_compare(&data->max_wait_time, &delta) < 0) {
102 nstime_copy(&data->max_wait_time, &delta);
103 }
104 if (n_thds > data->max_n_thds) {
105 data->max_n_thds = n_thds;
106 }
107}
108
109static void
110mutex_prof_data_init(mutex_prof_data_t *data) {
111 memset(data, 0, sizeof(mutex_prof_data_t));
112 nstime_init_zero(&data->max_wait_time);
113 nstime_init_zero(&data->tot_wait_time);
114 data->prev_owner = NULL;
115}
116
117void
118malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
119 malloc_mutex_assert_owner(tsdn, mutex);
120 mutex_prof_data_init(&mutex->prof_data);
121}
122
123static int
124mutex_addr_comp(const witness_t *witness1, void *mutex1,
125 const witness_t *witness2, void *mutex2) {
126 assert(mutex1 != NULL);
127 assert(mutex2 != NULL);
128 uintptr_t mu1int = (uintptr_t)mutex1;
129 uintptr_t mu2int = (uintptr_t)mutex2;
130 if (mu1int < mu2int) {
131 return -1;
132 } else if (mu1int == mu2int) {
133 return 0;
134 } else {
135 return 1;
136 }
137}
138
139bool
140malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
141 witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
142 mutex_prof_data_init(&mutex->prof_data);
143#ifdef _WIN32
144# if _WIN32_WINNT >= 0x0600
145 InitializeSRWLock(&mutex->lock);
146# else
147 if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
148 _CRT_SPINCOUNT)) {
149 return true;
150 }
151# endif
152#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
153 mutex->lock = OS_UNFAIR_LOCK_INIT;
154#elif (defined(JEMALLOC_MUTEX_INIT_CB))
155 if (postpone_init) {
156 mutex->postponed_next = postponed_mutexes;
157 postponed_mutexes = mutex;
158 } else {
159 if (_pthread_mutex_init_calloc_cb(&mutex->lock,
160 bootstrap_calloc) != 0) {
161 return true;
162 }
163 }
164#else
165 pthread_mutexattr_t attr;
166
167 if (pthread_mutexattr_init(&attr) != 0) {
168 return true;
169 }
170 pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
171 if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
172 pthread_mutexattr_destroy(&attr);
173 return true;
174 }
175 pthread_mutexattr_destroy(&attr);
176#endif
177 if (config_debug) {
178 mutex->lock_order = lock_order;
179 if (lock_order == malloc_mutex_address_ordered) {
180 witness_init(&mutex->witness, name, rank,
181 mutex_addr_comp, mutex);
182 } else {
183 witness_init(&mutex->witness, name, rank, NULL, NULL);
184 }
185 }
186 return false;
187}
188
189void
190malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
191 malloc_mutex_lock(tsdn, mutex);
192}
193
194void
195malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
196 malloc_mutex_unlock(tsdn, mutex);
197}
198
199void
200malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
201#ifdef JEMALLOC_MUTEX_INIT_CB
202 malloc_mutex_unlock(tsdn, mutex);
203#else
204 if (malloc_mutex_init(mutex, mutex->witness.name,
205 mutex->witness.rank, mutex->lock_order)) {
206 malloc_printf("<jemalloc>: Error re-initializing mutex in "
207 "child\n");
208 if (opt_abort) {
209 abort();
210 }
211 }
212#endif
213}
214
215bool
216malloc_mutex_boot(void) {
217#ifdef JEMALLOC_MUTEX_INIT_CB
218 postpone_init = false;
219 while (postponed_mutexes != NULL) {
220 if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
221 bootstrap_calloc) != 0) {
222 return true;
223 }
224 postponed_mutexes = postponed_mutexes->postponed_next;
225 }
226#endif
227 return false;
228}
229