1 | /* Copyright 2016 Google Inc. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. */ |
14 | |
15 | #ifndef NSYNC_INTERNAL_COMMON_H_ |
16 | #define NSYNC_INTERNAL_COMMON_H_ |
17 | |
18 | #include "nsync_cpp.h" |
19 | #include "platform.h" |
20 | #include "nsync_atomic.h" |
21 | #include "sem.h" |
22 | #include "nsync_waiter.h" |
23 | #include "dll.h" |
24 | #include "nsync_mu.h" |
25 | #include "nsync_note.h" |
26 | |
27 | /* Annotations for race detectors. */ |
28 | #if defined(__has_feature) && !defined(__SANITIZE_THREAD__) |
29 | #if __has_feature(thread_sanitizer) /* used by clang */ |
30 | #define __SANITIZE_THREAD__ 1 /* GCC uses this; fake it in clang */ |
31 | #endif |
32 | #endif |
33 | #if defined(__SANITIZE_THREAD__) |
34 | NSYNC_C_START_ |
35 | void AnnotateIgnoreWritesBegin(const char* file, int line); |
36 | void AnnotateIgnoreWritesEnd(const char* file, int line); |
37 | void AnnotateIgnoreReadsBegin(const char* file, int line); |
38 | void AnnotateIgnoreReadsEnd(const char* file, int line); |
39 | NSYNC_C_END_ |
40 | #define IGNORE_RACES_START() \ |
41 | do { \ |
42 | AnnotateIgnoreReadsBegin(__FILE__, __LINE__); \ |
43 | AnnotateIgnoreWritesBegin(__FILE__, __LINE__); \ |
44 | } while (0) |
45 | #define IGNORE_RACES_END() \ |
46 | do { \ |
47 | AnnotateIgnoreWritesEnd(__FILE__, __LINE__); \ |
48 | AnnotateIgnoreReadsEnd(__FILE__, __LINE__); \ |
49 | } while (0) |
50 | #else |
51 | #define IGNORE_RACES_START() |
52 | #define IGNORE_RACES_END() |
53 | #endif |
54 | |
55 | #ifndef NSYNC_DEBUG |
56 | #define NSYNC_DEBUG 0 |
57 | #endif |
58 | |
59 | NSYNC_CPP_START_ |
60 | |
61 | /* Yield the CPU. Platform specific. */ |
62 | void nsync_yield_ (void); |
63 | |
64 | /* Retrieve the per-thread cache of the waiter object. Platform specific. */ |
65 | void *nsync_per_thread_waiter_ (void (*dest) (void *)); |
66 | |
67 | /* Set the per-thread cache of the waiter object. Platform specific. */ |
68 | void nsync_set_per_thread_waiter_ (void *v, void (*dest) (void *)); |
69 | |
70 | /* Used in spinloops to delay resumption of the loop. |
71 | Usage: |
72 | unsigned attempts = 0; |
73 | while (try_something) { |
74 | attempts = nsync_spin_delay_ (attempts); |
75 | } */ |
76 | unsigned nsync_spin_delay_ (unsigned attempts); |
77 | |
78 | /* Spin until (*w & test) == 0, then atomically perform *w = ((*w | set) & |
79 | ~clear), perform an acquire barrier, and return the previous value of *w. |
80 | */ |
81 | uint32_t nsync_spin_test_and_set_ (nsync_atomic_uint32_ *w, uint32_t test, |
82 | uint32_t set, uint32_t clear); |
83 | |
84 | /* Abort after printing the nul-temrinated string s[]. */ |
85 | void nsync_panic_ (const char *s); |
86 | |
87 | /* ---------- */ |
88 | |
89 | #define MIN_(a_,b_) ((a_) < (b_)? (a_) : (b_)) |
90 | #define MAX_(a_,b_) ((a_) > (b_)? (a_) : (b_)) |
91 | |
92 | /* ---------- */ |
93 | |
94 | /* Fields in nsync_mu.word. |
95 | |
96 | - At least one of the MU_WLOCK or MU_RLOCK_FIELD fields must be zero. |
97 | - MU_WLOCK indicates that a write lock is held. |
98 | - MU_RLOCK_FIELD is a count of readers with read locks. |
99 | |
100 | - MU_SPINLOCK represents a spinlock that must be held when manipulating the |
101 | waiter queue. |
102 | |
103 | - MU_DESIG_WAKER indicates that a former waiter has been woken, but has |
104 | neither acquired the lock nor gone back to sleep. Legal to fail to set it; |
105 | illegal to set it when no such waiter exists. |
106 | |
107 | - MU_WAITING indicates whether the waiter queue is non-empty. |
108 | The following bits should be zero if MU_WAITING is zero. |
109 | - MU_CONDITION indicates that some waiter may have an associated condition |
110 | (from nsync_mu_wait, etc.). Legal to set it with no such waiter exists, |
111 | but illegal to fail to set it with such a waiter. |
112 | - MU_WRITER_WAITING indicates that a reader that has not yet blocked |
113 | at least once should not acquire in order not to starve waiting writers. |
114 | It set when a writer blocks or a reader is woken with a writer waiting. |
115 | It is reset when a writer acquires, but set again when that writer |
116 | releases if it wakes readers and there is a waiting writer. |
117 | - MU_LONG_WAIT indicates that a waiter has been woken many times but |
118 | repeatedly failed to acquire when competing for the lock. This is used |
119 | only to prevent long-term starvation by writers. The thread that sets it |
120 | clears it when if acquires. |
121 | - MU_ALL_FALSE indicates that a complete scan of the waiter list found no |
122 | waiters with true conditions, and the lock has not been acquired by a |
123 | writer since then. This allows a reader lock to be released without |
124 | testing conditions again. It is legal to fail to set this, but illegal |
125 | to set it inappropriately. |
126 | */ |
127 | #define MU_WLOCK ((uint32_t) (1 << 0)) /* writer lock is held. */ |
128 | #define MU_SPINLOCK ((uint32_t) (1 << 1)) /* spinlock is held (protects waiters). */ |
129 | #define MU_WAITING ((uint32_t) (1 << 2)) /* waiter list is non-empty. */ |
130 | #define MU_DESIG_WAKER ((uint32_t) (1 << 3)) /* a former waiter awoke, and hasn't yet acquired or slept anew */ |
131 | #define MU_CONDITION ((uint32_t) (1 << 4)) /* the wait list contains some conditional waiters. */ |
132 | #define MU_WRITER_WAITING ((uint32_t) (1 << 5)) /* there is a writer waiting */ |
133 | #define MU_LONG_WAIT ((uint32_t) (1 << 6)) /* the waiter at the head of the queue has been waiting a long time */ |
134 | #define MU_ALL_FALSE ((uint32_t) (1 << 7)) /* all waiter conditions are false */ |
135 | #define MU_RLOCK ((uint32_t) (1 << 8)) /* low-order bit of reader count, which uses rest of word */ |
136 | |
137 | /* The constants below are derived from those above. */ |
138 | #define MU_RLOCK_FIELD (~(uint32_t) (MU_RLOCK - 1)) /* mask of reader count field */ |
139 | |
140 | #define MU_ANY_LOCK (MU_WLOCK | MU_RLOCK_FIELD) /* mask for any lock held */ |
141 | |
142 | #define MU_WZERO_TO_ACQUIRE (MU_ANY_LOCK | MU_LONG_WAIT) /* bits to be zero to acquire write lock */ |
143 | #define MU_WADD_TO_ACQUIRE (MU_WLOCK) /* add to acquire a write lock */ |
144 | #define MU_WHELD_IF_NON_ZERO (MU_WLOCK) /* if any of these bits are set, write lock is held */ |
145 | #define MU_WSET_WHEN_WAITING (MU_WAITING | MU_WRITER_WAITING) /* a writer is waiting */ |
146 | #define MU_WCLEAR_ON_ACQUIRE (MU_WRITER_WAITING) /* clear MU_WRITER_WAITING when a writer acquires */ |
147 | #define MU_WCLEAR_ON_UNCONTENDED_RELEASE (MU_ALL_FALSE) /* clear if a writer releases w/o waking */ |
148 | |
149 | /* bits to be zero to acquire read lock */ |
150 | #define MU_RZERO_TO_ACQUIRE (MU_WLOCK | MU_WRITER_WAITING | MU_LONG_WAIT) |
151 | #define MU_RADD_TO_ACQUIRE (MU_RLOCK) /* add to acquire a read lock */ |
152 | #define MU_RHELD_IF_NON_ZERO (MU_RLOCK_FIELD) /* if any of these bits are set, read lock is held */ |
153 | #define MU_RSET_WHEN_WAITING (MU_WAITING) /* indicate that some thread is waiting */ |
154 | #define MU_RCLEAR_ON_ACQUIRE ((uint32_t) 0) /* nothing to clear when a read acquires */ |
155 | #define MU_RCLEAR_ON_UNCONTENDED_RELEASE ((uint32_t) 0) /* nothing to clear when a read releases */ |
156 | |
157 | |
158 | /* A lock_type holds the values needed to manipulate a mu in some mode (read or |
159 | write). This allows some of the code to be generic, and parameterized by |
160 | the lock type. */ |
161 | typedef struct lock_type_s { |
162 | uint32_t zero_to_acquire; /* bits that must be zero to acquire */ |
163 | uint32_t add_to_acquire; /* constant to add to acquire */ |
164 | uint32_t held_if_non_zero; /* if any of these bits are set, the lock is held */ |
165 | uint32_t set_when_waiting; /* set when thread waits */ |
166 | uint32_t clear_on_acquire; /* clear when thread acquires */ |
167 | uint32_t clear_on_uncontended_release; /* clear when thread releases without waking */ |
168 | } lock_type; |
169 | |
170 | |
171 | /* writer_type points to a lock_type that describes how to manipulate a mu for a writer. */ |
172 | extern lock_type *nsync_writer_type_; |
173 | |
174 | /* reader_type points to a lock_type that describes how to manipulate a mu for a reader. */ |
175 | extern lock_type *nsync_reader_type_; |
176 | |
177 | /* ---------- */ |
178 | |
179 | /* Bits in nsync_cv.word */ |
180 | |
181 | #define CV_SPINLOCK ((uint32_t) (1 << 0)) /* protects waiters */ |
182 | #define CV_NON_EMPTY ((uint32_t) (1 << 1)) /* waiters list is non-empty */ |
183 | |
184 | /* ---------- */ |
185 | |
186 | /* Hold a pair of condition function and its argument. */ |
187 | struct wait_condition_s { |
188 | int (*f) (const void *v); |
189 | const void *v; |
190 | int (*eq) (const void *a, const void *b); |
191 | }; |
192 | |
193 | /* Return whether wait conditions *a_ and *b_ are equal and non-null. */ |
194 | #define WAIT_CONDITION_EQ(a_, b_) ((a_)->f != NULL && (a_)->f == (b_)->f && \ |
195 | ((a_)->v == (b_)->v || \ |
196 | ((a_)->eq != NULL && (*(a_)->eq) ((a_)->v, (b_)->v)))) |
197 | |
198 | /* If a waiter has waited this many times, it may set the MU_LONG_WAIT bit. */ |
199 | #define LONG_WAIT_THRESHOLD 30 |
200 | |
201 | /* ---------- */ |
202 | |
203 | #define NOTIFIED_TIME(n_) (ATM_LOAD_ACQ (&(n_)->notified) != 0? nsync_time_zero : \ |
204 | (n_)->expiry_time_valid? (n_)->expiry_time : nsync_time_no_deadline) |
205 | |
206 | /* A waiter represents a single waiter on a cv or a mu. |
207 | |
208 | To wait: |
209 | Allocate a waiter struct *w with new_waiter(), set w.waiting=1, and |
210 | w.cv_mu=nil or to the associated mu if waiting on a condition variable, then |
211 | queue w.nsync_dll on some queue, and then wait using: |
212 | while (ATM_LOAD_ACQ (&w.waiting) != 0) { nsync_mu_semaphore_p (&w.sem); } |
213 | Return *w to the freepool by calling free_waiter (w). |
214 | |
215 | To wakeup: |
216 | Remove *w from the relevant queue then: |
217 | ATM_STORE_REL (&w.waiting, 0); |
218 | nsync_mu_semaphore_v (&w.sem); */ |
219 | typedef struct { |
220 | uint32_t tag; /* debug DLL_NSYNC_WAITER, DLL_WAITER, DLL_WAITER_SAMECOND */ |
221 | nsync_semaphore sem; /* Thread waits on this semaphore. */ |
222 | struct nsync_waiter_s nw; /* An embedded nsync_waiter_s. */ |
223 | struct nsync_mu_s_ *cv_mu; /* pointer to nsync_mu associated with a cv wait */ |
224 | lock_type *l_type; /* Lock type of the mu, or nil if not associated with a mu. */ |
225 | nsync_atomic_uint32_ remove_count; /* count of removals from queue */ |
226 | struct wait_condition_s cond; /* A condition on which to acquire a mu. */ |
227 | nsync_dll_element_ same_condition; /* Links neighbours in nw.q with same non-nil condition. */ |
228 | int flags; /* see WAITER_* bits below */ |
229 | } waiter; |
230 | static const uint32_t WAITER_TAG = 0x0590239f; |
231 | static const uint32_t NSYNC_WAITER_TAG = 0x726d2ba9; |
232 | |
233 | #define WAITER_RESERVED 0x1 /* waiter reserved by a thread, even when not in use */ |
234 | #define WAITER_IN_USE 0x2 /* waiter in use by a thread */ |
235 | |
236 | #define CONTAINER(t_,f_,p_) ((t_ *) (((char *) (p_)) - offsetof (t_, f_))) |
237 | #define ASSERT(x) do { if (!(x)) { *(volatile int *)0 = 0; } } while (0) |
238 | |
239 | /* Return a pointer to the nsync_waiter_s containing nsync_dll_element_ *e. */ |
240 | #define DLL_NSYNC_WAITER(e) (NSYNC_DEBUG? nsync_dll_nsync_waiter_ (e) : \ |
241 | ((struct nsync_waiter_s *)((e)->container))) |
242 | struct nsync_waiter_s *nsync_dll_nsync_waiter_ (nsync_dll_element_ *e); |
243 | |
244 | /* Return a pointer to the waiter struct that *e is embedded in, where *e is an nw.q field. */ |
245 | #define DLL_WAITER(e) (NSYNC_DEBUG? nsync_dll_waiter_ (e) : \ |
246 | CONTAINER (waiter, nw, DLL_NSYNC_WAITER(e))) |
247 | waiter *nsync_dll_waiter_ (nsync_dll_element_ *e); |
248 | |
249 | /* Return a pointer to the waiter struct that *e is embedded in, where *e is a |
250 | same_condition field. */ |
251 | #define DLL_WAITER_SAMECOND(e) (NSYNC_DEBUG? nsync_dll_waiter_samecond_ (e) : \ |
252 | ((waiter *) ((e)->container))) |
253 | waiter *nsync_dll_waiter_samecond_ (nsync_dll_element_ *e); |
254 | |
255 | /* Return a pointer to an unused waiter struct. |
256 | Ensures that the enclosed timer is stopped and its channel drained. */ |
257 | waiter *nsync_waiter_new_ (void); |
258 | |
259 | /* Return an unused waiter struct *w to the free pool. */ |
260 | void nsync_waiter_free_ (waiter *w); |
261 | |
262 | /* ---------- */ |
263 | |
264 | /* The internals of an nync_note. See internal/note.c for details of locking |
265 | discipline. */ |
266 | struct nsync_note_s_ { |
267 | nsync_dll_element_ parent_child_link; /* parent's children, under parent->note_mu */ |
268 | int expiry_time_valid; /* whether expiry_time is valid; r/o after init */ |
269 | nsync_time expiry_time; /* expiry time, if expiry_time_valid != 0; r/o after init */ |
270 | nsync_mu note_mu; /* protects fields below except "notified" */ |
271 | nsync_cv no_children_cv; /* signalled when children becomes empty */ |
272 | uint32_t disconnecting; /* non-zero => node is being disconnected */ |
273 | nsync_atomic_uint32_ notified; /* non-zero if the note has been notified */ |
274 | struct nsync_note_s_ *parent; /* points to parent, if any */ |
275 | nsync_dll_element_ *children; /* list of children */ |
276 | nsync_dll_element_ *waiters; /* list of waiters */ |
277 | }; |
278 | |
279 | /* ---------- */ |
280 | |
281 | void nsync_mu_lock_slow_ (nsync_mu *mu, waiter *w, uint32_t clear, lock_type *l_type); |
282 | void nsync_mu_unlock_slow_ (nsync_mu *mu, lock_type *l_type); |
283 | nsync_dll_list_ nsync_remove_from_mu_queue_ (nsync_dll_list_ mu_queue, nsync_dll_element_ *e); |
284 | void nsync_maybe_merge_conditions_ (nsync_dll_element_ *p, nsync_dll_element_ *n); |
285 | nsync_time nsync_note_notified_deadline_ (nsync_note n); |
286 | int nsync_sem_wait_with_cancel_ (waiter *w, nsync_time abs_deadline, |
287 | nsync_note cancel_note); |
288 | NSYNC_CPP_END_ |
289 | |
290 | #endif /*NSYNC_INTERNAL_COMMON_H_*/ |
291 | |