1 | /* Copyright 2016 Google Inc. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. */ |
14 | |
15 | #include "nsync_cpp.h" |
16 | #include "platform.h" |
17 | #include "compiler.h" |
18 | #include "cputype.h" |
19 | #include "nsync.h" |
20 | #include "dll.h" |
21 | #include "sem.h" |
22 | #include "wait_internal.h" |
23 | #include "common.h" |
24 | #include "atomic.h" |
25 | |
26 | NSYNC_CPP_START_ |
27 | |
28 | /* Initialize *mu. */ |
29 | void nsync_mu_init (nsync_mu *mu) { |
30 | memset ((void *) mu, 0, sizeof (*mu)); |
31 | } |
32 | |
33 | /* Release the mutex spinlock. */ |
34 | static void mu_release_spinlock (nsync_mu *mu) { |
35 | uint32_t old_word = ATM_LOAD (&mu->word); |
36 | while (!ATM_CAS_REL (&mu->word, old_word, old_word & ~MU_SPINLOCK)) { |
37 | old_word = ATM_LOAD (&mu->word); |
38 | } |
39 | } |
40 | |
41 | /* Lock *mu using the specified lock_type, waiting on *w if necessary. |
42 | "clear" should be zero if the thread has not previously slept on *mu, and |
43 | MU_DESIG_WAKER if it has; this represents bits that nsync_mu_lock_slow_() must clear when |
44 | it either acquires or sleeps on *mu. The caller owns *w on return; it is in a valid |
45 | state to be returned to the free pool. */ |
46 | void nsync_mu_lock_slow_ (nsync_mu *mu, waiter *w, uint32_t clear, lock_type *l_type) { |
47 | uint32_t zero_to_acquire; |
48 | uint32_t wait_count; |
49 | uint32_t long_wait; |
50 | unsigned attempts = 0; /* attempt count; used for spinloop backoff */ |
51 | w->cv_mu = NULL; /* not a cv wait */ |
52 | w->cond.f = NULL; /* Not using a conditional critical section. */ |
53 | w->cond.v = NULL; |
54 | w->cond.eq = NULL; |
55 | w->l_type = l_type; |
56 | zero_to_acquire = l_type->zero_to_acquire; |
57 | if (clear != 0) { |
58 | /* Only the constraints of mutual exclusion should stop a designated waker. */ |
59 | zero_to_acquire &= ~(MU_WRITER_WAITING | MU_LONG_WAIT); |
60 | } |
61 | wait_count = 0; /* number of times we waited, and were woken. */ |
62 | long_wait = 0; /* set to MU_LONG_WAIT when wait_count gets large */ |
63 | for (;;) { |
64 | uint32_t old_word = ATM_LOAD (&mu->word); |
65 | if ((old_word & zero_to_acquire) == 0) { |
66 | /* lock can be acquired; try to acquire, possibly |
67 | clearing MU_DESIG_WAKER and MU_LONG_WAIT. */ |
68 | if (ATM_CAS_ACQ (&mu->word, old_word, |
69 | (old_word+l_type->add_to_acquire) & |
70 | ~(clear|long_wait|l_type->clear_on_acquire))) { |
71 | return; |
72 | } |
73 | } else if ((old_word&MU_SPINLOCK) == 0 && |
74 | ATM_CAS_ACQ (&mu->word, old_word, |
75 | (old_word|MU_SPINLOCK|long_wait| |
76 | l_type->set_when_waiting) & ~(clear | MU_ALL_FALSE))) { |
77 | |
78 | /* Spinlock is now held, and lock is held by someone |
79 | else; MU_WAITING has also been set; queue ourselves. |
80 | There's no need to adjust same_condition here, |
81 | because w.condition==NULL. */ |
82 | ATM_STORE (&w->nw.waiting, 1); |
83 | if (wait_count == 0) { |
84 | /* first wait goes to end of queue */ |
85 | mu->waiters = nsync_dll_make_last_in_list_ (mu->waiters, |
86 | &w->nw.q); |
87 | } else { |
88 | /* subsequent waits go to front of queue */ |
89 | mu->waiters = nsync_dll_make_first_in_list_ (mu->waiters, |
90 | &w->nw.q); |
91 | } |
92 | |
93 | /* Release spinlock. Cannot use a store here, because |
94 | the current thread does not hold the mutex. If |
95 | another thread were a designated waker, the mutex |
96 | holder could be concurrently unlocking, even though |
97 | we hold the spinlock. */ |
98 | mu_release_spinlock (mu); |
99 | |
100 | /* wait until awoken. */ |
101 | while (ATM_LOAD_ACQ (&w->nw.waiting) != 0) { /* acquire load */ |
102 | nsync_mu_semaphore_p (&w->sem); |
103 | } |
104 | wait_count++; |
105 | /* If the thread has been woken more than this many |
106 | times, and still not acquired, it sets the |
107 | MU_LONG_WAIT bit to prevent thread that have not |
108 | waited from acquiring. This is the starvation |
109 | avoidance mechanism. The number is fairly high so |
110 | that we continue to benefit from the throughput of |
111 | not having running threads wait unless absolutely |
112 | necessary. */ |
113 | if (wait_count == LONG_WAIT_THRESHOLD) { /* repeatedly woken */ |
114 | long_wait = MU_LONG_WAIT; /* force others to wait at least once */ |
115 | } |
116 | |
117 | attempts = 0; |
118 | clear = MU_DESIG_WAKER; |
119 | /* Threads that have been woken at least once don't care |
120 | about waiting writers or long waiters. */ |
121 | zero_to_acquire &= ~(MU_WRITER_WAITING | MU_LONG_WAIT); |
122 | } |
123 | attempts = nsync_spin_delay_ (attempts); |
124 | } |
125 | } |
126 | |
127 | /* Attempt to acquire *mu in writer mode without blocking, and return non-zero |
128 | iff successful. Return non-zero with high probability if *mu was free on |
129 | entry. */ |
130 | int nsync_mu_trylock (nsync_mu *mu) { |
131 | int result; |
132 | IGNORE_RACES_START (); |
133 | if (ATM_CAS_ACQ (&mu->word, 0, MU_WADD_TO_ACQUIRE)) { /* acquire CAS */ |
134 | result = 1; |
135 | } else { |
136 | uint32_t old_word = ATM_LOAD (&mu->word); |
137 | result = ((old_word & MU_WZERO_TO_ACQUIRE) == 0 && |
138 | ATM_CAS_ACQ (&mu->word, old_word, |
139 | (old_word + MU_WADD_TO_ACQUIRE) & ~MU_WCLEAR_ON_ACQUIRE)); |
140 | } |
141 | IGNORE_RACES_END (); |
142 | return (result); |
143 | } |
144 | |
145 | /* Block until *mu is free and then acquire it in writer mode. */ |
146 | void nsync_mu_lock (nsync_mu *mu) { |
147 | IGNORE_RACES_START (); |
148 | if (!ATM_CAS_ACQ (&mu->word, 0, MU_WADD_TO_ACQUIRE)) { /* acquire CAS */ |
149 | uint32_t old_word = ATM_LOAD (&mu->word); |
150 | if ((old_word&MU_WZERO_TO_ACQUIRE) != 0 || |
151 | !ATM_CAS_ACQ (&mu->word, old_word, |
152 | (old_word+MU_WADD_TO_ACQUIRE) & ~MU_WCLEAR_ON_ACQUIRE)) { |
153 | waiter *w = nsync_waiter_new_ (); |
154 | nsync_mu_lock_slow_ (mu, w, 0, nsync_writer_type_); |
155 | nsync_waiter_free_ (w); |
156 | } |
157 | } |
158 | IGNORE_RACES_END (); |
159 | } |
160 | |
161 | /* Attempt to acquire *mu in reader mode without blocking, and return non-zero |
162 | iff successful. Returns non-zero with high probability if *mu was free on |
163 | entry. It may fail to acquire if a writer is waiting, to avoid starvation. |
164 | */ |
165 | int nsync_mu_rtrylock (nsync_mu *mu) { |
166 | int result; |
167 | IGNORE_RACES_START (); |
168 | if (ATM_CAS_ACQ (&mu->word, 0, MU_RADD_TO_ACQUIRE)) { /* acquire CAS */ |
169 | result = 1; |
170 | } else { |
171 | uint32_t old_word = ATM_LOAD (&mu->word); |
172 | result = ((old_word&MU_RZERO_TO_ACQUIRE) == 0 && |
173 | ATM_CAS_ACQ (&mu->word, old_word, |
174 | (old_word+MU_RADD_TO_ACQUIRE) & ~MU_RCLEAR_ON_ACQUIRE)); |
175 | } |
176 | IGNORE_RACES_END (); |
177 | return (result); |
178 | } |
179 | |
180 | /* Block until *mu can be acquired in reader mode and then acquire it. */ |
181 | void nsync_mu_rlock (nsync_mu *mu) { |
182 | IGNORE_RACES_START (); |
183 | if (!ATM_CAS_ACQ (&mu->word, 0, MU_RADD_TO_ACQUIRE)) { /* acquire CAS */ |
184 | uint32_t old_word = ATM_LOAD (&mu->word); |
185 | if ((old_word&MU_RZERO_TO_ACQUIRE) != 0 || |
186 | !ATM_CAS_ACQ (&mu->word, old_word, |
187 | (old_word+MU_RADD_TO_ACQUIRE) & ~MU_RCLEAR_ON_ACQUIRE)) { |
188 | waiter *w = nsync_waiter_new_ (); |
189 | nsync_mu_lock_slow_ (mu, w, 0, nsync_reader_type_); |
190 | nsync_waiter_free_ (w); |
191 | } |
192 | } |
193 | IGNORE_RACES_END (); |
194 | } |
195 | |
196 | /* Invoke the condition associated with *p, which is an element of |
197 | a "waiter" list. */ |
198 | static int condition_true (nsync_dll_element_ *p) { |
199 | return ((*DLL_WAITER (p)->cond.f) (DLL_WAITER (p)->cond.v)); |
200 | } |
201 | |
202 | /* If *p is an element of waiter_list (a list of "waiter" structs(, return a |
203 | pointer to the next element of the list that has a different condition. */ |
204 | static nsync_dll_element_ *skip_past_same_condition ( |
205 | nsync_dll_list_ waiter_list, nsync_dll_element_ *p) { |
206 | nsync_dll_element_ *next; |
207 | nsync_dll_element_ *last_with_same_condition = |
208 | &DLL_WAITER_SAMECOND (DLL_WAITER (p)->same_condition.prev)->nw.q; |
209 | if (last_with_same_condition != p && last_with_same_condition != p->prev) { |
210 | /* First in set with same condition, so skip to end. */ |
211 | next = nsync_dll_next_ (waiter_list, last_with_same_condition); |
212 | } else { |
213 | next = nsync_dll_next_ (waiter_list, p); |
214 | } |
215 | return (next); |
216 | } |
217 | |
218 | /* Merge the same_condition lists of *p and *n if they have the same non-NULL |
219 | condition. */ |
220 | void nsync_maybe_merge_conditions_ (nsync_dll_element_ *p, nsync_dll_element_ *n) { |
221 | if (p != NULL && n != NULL && |
222 | WAIT_CONDITION_EQ (&DLL_WAITER (p)->cond, &DLL_WAITER (n)->cond)) { |
223 | nsync_dll_splice_after_ (&DLL_WAITER (p)->same_condition, |
224 | &DLL_WAITER (n)->same_condition); |
225 | } |
226 | } |
227 | |
228 | /* Remove element *e from nsync_mu waiter queue mu_queue, fixing |
229 | up the same_condition list by merging the lists on either side if possible. |
230 | Also increment the waiter's remove_count. */ |
231 | nsync_dll_list_ nsync_remove_from_mu_queue_ (nsync_dll_list_ mu_queue, nsync_dll_element_ *e) { |
232 | /* Record previous and next elements in the original queue. */ |
233 | nsync_dll_element_ *prev = e->prev; |
234 | nsync_dll_element_ *next = e->next; |
235 | uint32_t old_value; |
236 | /* Remove. */ |
237 | mu_queue = nsync_dll_remove_ (mu_queue, e); |
238 | do { |
239 | old_value = ATM_LOAD (&DLL_WAITER (e)->remove_count); |
240 | } while (!ATM_CAS (&DLL_WAITER (e)->remove_count, old_value, old_value+1)); |
241 | if (!nsync_dll_is_empty_ (mu_queue)) { |
242 | /* Fix up same_condition. */ |
243 | nsync_dll_element_ *e_same_condition = &DLL_WAITER (e)->same_condition; |
244 | |
245 | if (e_same_condition->next != e_same_condition) { |
246 | /* *e is linked to a same_condition neighbour---just remove it. */ |
247 | e_same_condition->next->prev = e_same_condition->prev; |
248 | e_same_condition->prev->next = e_same_condition->next; |
249 | e_same_condition->next = e_same_condition; |
250 | e_same_condition->prev = e_same_condition; |
251 | } else if (prev != nsync_dll_last_ (mu_queue)) { |
252 | /* Merge the new neighbours together if we can. */ |
253 | nsync_maybe_merge_conditions_ (prev, next); |
254 | } |
255 | } |
256 | return (mu_queue); |
257 | } |
258 | |
259 | /* Unlock *mu and wake one or more waiters as appropriate after an unlock. |
260 | It is called with *mu held in mode l_type. */ |
261 | void nsync_mu_unlock_slow_ (nsync_mu *mu, lock_type *l_type) { |
262 | unsigned attempts = 0; /* attempt count; used for backoff */ |
263 | for (;;) { |
264 | uint32_t old_word = ATM_LOAD (&mu->word); |
265 | int testing_conditions = ((old_word & MU_CONDITION) != 0); |
266 | uint32_t early_release_mu = l_type->add_to_acquire; |
267 | uint32_t late_release_mu = 0; |
268 | if (testing_conditions) { |
269 | /* Convert to a writer lock, and release later. |
270 | - A writer lock is currently needed to test conditions |
271 | because exclusive access is needed to the list to |
272 | allow modification. The spinlock cannot be used |
273 | to achieve that, because an internal lock should not |
274 | be held when calling the external predicates. |
275 | - We must test conditions even though a reader region |
276 | cannot have made any new ones true because some |
277 | might have been true before the reader region started. |
278 | The MU_ALL_FALSE test below shortcuts the case where |
279 | the conditions are known all to be false. */ |
280 | early_release_mu = l_type->add_to_acquire - MU_WLOCK; |
281 | late_release_mu = MU_WLOCK; |
282 | } |
283 | if ((old_word&MU_WAITING) == 0 || (old_word&MU_DESIG_WAKER) != 0 || |
284 | (old_word & MU_RLOCK_FIELD) > MU_RLOCK || |
285 | (old_word & (MU_RLOCK|MU_ALL_FALSE)) == (MU_RLOCK|MU_ALL_FALSE)) { |
286 | /* no one to wake, there's a designated waker waking |
287 | up, there are still readers, or it's a reader and all waiters |
288 | have false conditions */ |
289 | if (ATM_CAS_REL (&mu->word, old_word, |
290 | (old_word - l_type->add_to_acquire) & |
291 | ~l_type->clear_on_uncontended_release)) { |
292 | return; |
293 | } |
294 | } else if ((old_word&MU_SPINLOCK) == 0 && |
295 | ATM_CAS_ACQ (&mu->word, old_word, |
296 | (old_word-early_release_mu)|MU_SPINLOCK|MU_DESIG_WAKER)) { |
297 | nsync_dll_list_ wake; |
298 | lock_type *wake_type; |
299 | uint32_t clear_on_release; |
300 | uint32_t set_on_release; |
301 | /* The spinlock is now held, and we've set the |
302 | designated wake flag, since we're likely to wake a |
303 | thread that will become that designated waker. If |
304 | there are conditions to check, the mutex itself is |
305 | still held. */ |
306 | |
307 | nsync_dll_element_ *p = NULL; |
308 | nsync_dll_element_ *next = NULL; |
309 | |
310 | /* Swap the entire mu->waiters list into the local |
311 | "new_waiters" list. This gives us exclusive access |
312 | to the list, even if we unlock the spinlock, which |
313 | we may do if checking conditions. The loop below |
314 | will grab more new waiters that arrived while we |
315 | were checking conditions, and terminates only if no |
316 | new waiters arrive in one loop iteration. */ |
317 | nsync_dll_list_ waiters = NULL; |
318 | nsync_dll_list_ new_waiters = mu->waiters; |
319 | mu->waiters = NULL; |
320 | |
321 | /* Remove a waiter from the queue, if possible. */ |
322 | wake = NULL; /* waiters to wake. */ |
323 | wake_type = NULL; /* type of waiter(s) on wake, or NULL if wake is empty. */ |
324 | clear_on_release = MU_SPINLOCK; |
325 | set_on_release = MU_ALL_FALSE; |
326 | while (!nsync_dll_is_empty_ (new_waiters)) { /* some new waiters to consider */ |
327 | p = nsync_dll_first_ (new_waiters); |
328 | if (testing_conditions) { |
329 | /* Should we continue to test conditions? */ |
330 | if (wake_type == nsync_writer_type_) { |
331 | /* No, because we're already waking a writer, |
332 | and need wake no others.*/ |
333 | testing_conditions = 0; |
334 | } else if (wake_type == NULL && |
335 | DLL_WAITER (p)->l_type != nsync_reader_type_ && |
336 | DLL_WAITER (p)->cond.f == NULL) { |
337 | /* No, because we've woken no one, but the |
338 | first waiter is a writer with no condition, |
339 | so we will certainly wake it, and need wake |
340 | no others. */ |
341 | testing_conditions = 0; |
342 | } |
343 | } |
344 | /* If testing waiters' conditions, release the |
345 | spinlock while still holding the write lock. |
346 | This is so that the spinlock is not held |
347 | while the conditions are evaluated. */ |
348 | if (testing_conditions) { |
349 | mu_release_spinlock (mu); |
350 | } |
351 | |
352 | /* Process the new waiters picked up in this iteration of the |
353 | "while (!nsync_dll_is_empty_ (new_waiters))" loop, |
354 | and stop looking when we run out of waiters, or we find |
355 | a writer to wake up. */ |
356 | while (p != NULL && wake_type != nsync_writer_type_) { |
357 | int p_has_condition; |
358 | next = nsync_dll_next_ (new_waiters, p); |
359 | p_has_condition = (DLL_WAITER (p)->cond.f != NULL); |
360 | if (p_has_condition && !testing_conditions) { |
361 | nsync_panic_ ("checking a waiter condition " |
362 | "while unlocked\n" ); |
363 | } |
364 | if (p_has_condition && !condition_true (p)) { |
365 | /* condition is false */ |
366 | /* skip to the end of the same_condition group. */ |
367 | next = skip_past_same_condition (new_waiters, p); |
368 | } else if (wake_type == NULL || |
369 | DLL_WAITER (p)->l_type == nsync_reader_type_) { |
370 | /* Wake this thread. */ |
371 | new_waiters = nsync_remove_from_mu_queue_ ( |
372 | new_waiters, p); |
373 | wake = nsync_dll_make_last_in_list_ (wake, p); |
374 | wake_type = DLL_WAITER (p)->l_type; |
375 | } else { |
376 | /* Failing to wake a writer |
377 | that could acquire if it |
378 | were first. */ |
379 | set_on_release |= MU_WRITER_WAITING; |
380 | set_on_release &= ~MU_ALL_FALSE; |
381 | } |
382 | p = next; |
383 | } |
384 | |
385 | if (p != NULL) { |
386 | /* Didn't search to end of list, so can't be sure |
387 | all conditions are false. */ |
388 | set_on_release &= ~MU_ALL_FALSE; |
389 | } |
390 | |
391 | /* If testing waiters' conditions, reacquire the spinlock |
392 | released above. */ |
393 | if (testing_conditions) { |
394 | nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK, |
395 | MU_SPINLOCK, 0); |
396 | } |
397 | |
398 | /* add the new_waiters to the last of the waiters. */ |
399 | nsync_maybe_merge_conditions_ (nsync_dll_last_ (waiters), |
400 | nsync_dll_first_ (new_waiters)); |
401 | waiters = nsync_dll_make_last_in_list_ (waiters, |
402 | nsync_dll_last_ (new_waiters)); |
403 | /* Pick up the next set of new waiters. */ |
404 | new_waiters = mu->waiters; |
405 | mu->waiters = NULL; |
406 | } |
407 | |
408 | /* Return the local waiter list to *mu. */ |
409 | mu->waiters = waiters; |
410 | |
411 | if (nsync_dll_is_empty_ (wake)) { |
412 | /* not waking a waiter => no designated waker */ |
413 | clear_on_release |= MU_DESIG_WAKER; |
414 | } |
415 | |
416 | if ((set_on_release & MU_ALL_FALSE) == 0) { |
417 | /* If not explicitly setting MU_ALL_FALSE, clear it. */ |
418 | clear_on_release |= MU_ALL_FALSE; |
419 | } |
420 | |
421 | if (nsync_dll_is_empty_ (mu->waiters)) { |
422 | /* no waiters left */ |
423 | clear_on_release |= MU_WAITING | MU_WRITER_WAITING | |
424 | MU_CONDITION | MU_ALL_FALSE; |
425 | } |
426 | |
427 | /* Release the spinlock, and possibly the lock if |
428 | late_release_mu is non-zero. Other bits are set or |
429 | cleared according to whether we woke any threads, |
430 | whether any waiters remain, and whether any of them |
431 | are writers. */ |
432 | old_word = ATM_LOAD (&mu->word); |
433 | while (!ATM_CAS_REL (&mu->word, old_word, |
434 | ((old_word-late_release_mu)|set_on_release) & |
435 | ~clear_on_release)) { /* release CAS */ |
436 | old_word = ATM_LOAD (&mu->word); |
437 | } |
438 | /* Wake the waiters. */ |
439 | for (p = nsync_dll_first_ (wake); p != NULL; p = next) { |
440 | next = nsync_dll_next_ (wake, p); |
441 | wake = nsync_dll_remove_ (wake, p); |
442 | ATM_STORE_REL (&DLL_NSYNC_WAITER (p)->waiting, 0); |
443 | nsync_mu_semaphore_v (&DLL_WAITER (p)->sem); |
444 | } |
445 | return; |
446 | } |
447 | attempts = nsync_spin_delay_ (attempts); |
448 | } |
449 | } |
450 | |
451 | /* Unlock *mu, which must be held in write mode, and wake waiters, if appropriate. */ |
452 | void nsync_mu_unlock (nsync_mu *mu) { |
453 | IGNORE_RACES_START (); |
454 | /* C is not a garbage-collected language, so we cannot release until we |
455 | can be sure that we will not have to touch the mutex again to wake a |
456 | waiter. Another thread could acquire, decrement a reference count |
457 | and deallocate the mutex before the current thread touched the mutex |
458 | word again. */ |
459 | if (!ATM_CAS_REL (&mu->word, MU_WLOCK, 0)) { |
460 | uint32_t old_word = ATM_LOAD (&mu->word); |
461 | /* Clear MU_ALL_FALSE because the critical section we're just |
462 | leaving may have made some conditions true. */ |
463 | uint32_t new_word = (old_word - MU_WLOCK) & ~MU_ALL_FALSE; |
464 | /* Sanity check: mutex must be held in write mode, and there |
465 | must be no readers. */ |
466 | if ((new_word & (MU_RLOCK_FIELD | MU_WLOCK)) != 0) { |
467 | if ((old_word & MU_RLOCK_FIELD) != 0) { |
468 | nsync_panic_ ("attempt to nsync_mu_unlock() an nsync_mu " |
469 | "held in read mode\n" ); |
470 | } else { |
471 | nsync_panic_ ("attempt to nsync_mu_unlock() an nsync_mu " |
472 | "not held in write mode\n" ); |
473 | } |
474 | } else if ((old_word & (MU_WAITING|MU_DESIG_WAKER)) == MU_WAITING || |
475 | !ATM_CAS_REL (&mu->word, old_word, new_word)) { |
476 | /* There are waiters and no designated waker, or |
477 | our initial CAS attempt failed, to use slow path. */ |
478 | nsync_mu_unlock_slow_ (mu, nsync_writer_type_); |
479 | } |
480 | } |
481 | IGNORE_RACES_END (); |
482 | } |
483 | |
484 | /* Unlock *mu, which must be held in read mode, and wake waiters, if appropriate. */ |
485 | void nsync_mu_runlock (nsync_mu *mu) { |
486 | IGNORE_RACES_START (); |
487 | /* See comment in nsync_mu_unlock(). */ |
488 | if (!ATM_CAS_REL (&mu->word, MU_RLOCK, 0)) { |
489 | uint32_t old_word = ATM_LOAD (&mu->word); |
490 | /* Sanity check: mutex must not be held in write mode and |
491 | reader count must not be 0. */ |
492 | if (((old_word ^ MU_WLOCK) & (MU_WLOCK | MU_RLOCK_FIELD)) == 0) { |
493 | if ((old_word & MU_WLOCK) != 0) { |
494 | nsync_panic_ ("attempt to nsync_mu_runlock() an nsync_mu " |
495 | "held in write mode\n" ); |
496 | } else { |
497 | nsync_panic_ ("attempt to nsync_mu_runlock() an nsync_mu " |
498 | "not held in read mode\n" ); |
499 | } |
500 | } else if ((old_word & (MU_WAITING | MU_DESIG_WAKER)) == MU_WAITING && |
501 | (old_word & (MU_RLOCK_FIELD|MU_ALL_FALSE)) == MU_RLOCK) { |
502 | /* There are waiters and no designated waker, the last |
503 | reader is unlocking, and not all waiters have a |
504 | false condition. So we must take the slow path to |
505 | attempt to wake a waiter. */ |
506 | nsync_mu_unlock_slow_ (mu, nsync_reader_type_); |
507 | } else if (!ATM_CAS_REL (&mu->word, old_word, old_word - MU_RLOCK)) { |
508 | /* CAS attempt failed, so take slow path. */ |
509 | nsync_mu_unlock_slow_ (mu, nsync_reader_type_); |
510 | } |
511 | } |
512 | IGNORE_RACES_END (); |
513 | } |
514 | |
515 | /* Abort if *mu is not held in write mode. */ |
516 | void nsync_mu_assert_held (const nsync_mu *mu) { |
517 | IGNORE_RACES_START (); |
518 | if ((ATM_LOAD (&mu->word) & MU_WHELD_IF_NON_ZERO) == 0) { |
519 | nsync_panic_ ("nsync_mu not held in write mode\n" ); |
520 | } |
521 | IGNORE_RACES_END (); |
522 | } |
523 | |
524 | /* Abort if *mu is not held in read or write mode. */ |
525 | void nsync_mu_rassert_held (const nsync_mu *mu) { |
526 | IGNORE_RACES_START (); |
527 | if ((ATM_LOAD (&mu->word) & MU_ANY_LOCK) == 0) { |
528 | nsync_panic_ ("nsync_mu not held in some mode\n" ); |
529 | } |
530 | IGNORE_RACES_END (); |
531 | } |
532 | |
533 | /* Return whether *mu is held in read mode. |
534 | Requires that *mu is held in some mode. */ |
535 | int nsync_mu_is_reader (const nsync_mu *mu) { |
536 | uint32_t word; |
537 | IGNORE_RACES_START (); |
538 | word = ATM_LOAD (&mu->word); |
539 | if ((word & MU_ANY_LOCK) == 0) { |
540 | nsync_panic_ ("nsync_mu not held in some mode\n" ); |
541 | } |
542 | IGNORE_RACES_END (); |
543 | return ((word & MU_WLOCK) == 0); |
544 | } |
545 | |
546 | NSYNC_CPP_END_ |
547 | |