1 | /* Copyright 2016 Google Inc. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. */ |
14 | |
15 | #include "nsync_cpp.h" |
16 | #include "platform.h" |
17 | #include "compiler.h" |
18 | #include "cputype.h" |
19 | #include "nsync.h" |
20 | #include "sem.h" |
21 | #include "dll.h" |
22 | #include "wait_internal.h" |
23 | #include "common.h" |
24 | #include "atomic.h" |
25 | |
26 | NSYNC_CPP_START_ |
27 | |
28 | int nsync_wait_n (void *mu, void (*lock) (void *), void (*unlock) (void *), |
29 | nsync_time abs_deadline, |
30 | int count, struct nsync_waitable_s *waitable[]) { |
31 | int ready; |
32 | IGNORE_RACES_START (); |
33 | for (ready = 0; ready != count && |
34 | nsync_time_cmp ((*waitable[ready]->funcs->ready_time) ( |
35 | waitable[ready]->v, NULL), |
36 | nsync_time_zero) > 0; |
37 | ready++) { |
38 | } |
39 | if (ready == count && nsync_time_cmp (abs_deadline, nsync_time_zero) > 0) { |
40 | int i; |
41 | int unlocked = 0; |
42 | int j; |
43 | int enqueued = 1; |
44 | waiter *w = nsync_waiter_new_ (); |
45 | struct nsync_waiter_s nw_set[4]; |
46 | struct nsync_waiter_s *nw = nw_set; |
47 | if (count > (int) (sizeof (nw_set) / sizeof (nw_set[0]))) { |
48 | nw = (struct nsync_waiter_s *) malloc (count * sizeof (nw[0])); |
49 | } |
50 | for (i = 0; i != count && enqueued; i++) { |
51 | nw[i].tag = NSYNC_WAITER_TAG; |
52 | nw[i].sem = &w->sem; |
53 | nsync_dll_init_ (&nw[i].q, &nw[i]); |
54 | ATM_STORE (&nw[i].waiting, 0); |
55 | nw[i].flags = 0; |
56 | enqueued = (*waitable[i]->funcs->enqueue) (waitable[i]->v, &nw[i]); |
57 | } |
58 | |
59 | if (i == count) { |
60 | nsync_time min_ntime; |
61 | if (mu != NULL) { |
62 | (*unlock) (mu); |
63 | unlocked = 1; |
64 | } |
65 | do { |
66 | min_ntime = abs_deadline; |
67 | for (j = 0; j != count; j++) { |
68 | nsync_time ntime; |
69 | ntime = (*waitable[j]->funcs->ready_time) ( |
70 | waitable[j]->v, &nw[j]); |
71 | if (nsync_time_cmp (ntime, min_ntime) < 0) { |
72 | min_ntime = ntime; |
73 | } |
74 | } |
75 | } while (nsync_time_cmp (min_ntime, nsync_time_zero) > 0 && |
76 | nsync_mu_semaphore_p_with_deadline (&w->sem, |
77 | min_ntime) == 0); |
78 | } |
79 | |
80 | /* An attempt was made above to enqueue waitable[0..i-1]. |
81 | Dequeue any that are still enqueued, and remember the index |
82 | of the first ready (i.e., not still enqueued) object, if any. */ |
83 | for (j = 0; j != i; j++) { |
84 | int was_still_enqueued = |
85 | (*waitable[j]->funcs->dequeue) (waitable[j]->v, &nw[j]); |
86 | if (!was_still_enqueued && ready == count) { |
87 | ready = j; |
88 | } |
89 | } |
90 | |
91 | if (nw != nw_set) { |
92 | free (nw); |
93 | } |
94 | nsync_waiter_free_ (w); |
95 | if (unlocked) { |
96 | (*lock) (mu); |
97 | } |
98 | } |
99 | IGNORE_RACES_END (); |
100 | return (ready); |
101 | } |
102 | |
103 | NSYNC_CPP_END_ |
104 | |