/proc/self/cwd/external/nsync/internal/cv.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright 2016 Google Inc. |
2 | | |
3 | | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | you may not use this file except in compliance with the License. |
5 | | You may obtain a copy of the License at |
6 | | |
7 | | http://www.apache.org/licenses/LICENSE-2.0 |
8 | | |
9 | | Unless required by applicable law or agreed to in writing, software |
10 | | distributed under the License is distributed on an "AS IS" BASIS, |
11 | | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | See the License for the specific language governing permissions and |
13 | | limitations under the License. */ |
14 | | |
15 | | #include "nsync_cpp.h" |
16 | | #include "platform.h" |
17 | | #include "compiler.h" |
18 | | #include "cputype.h" |
19 | | #include "nsync.h" |
20 | | #include "dll.h" |
21 | | #include "sem.h" |
22 | | #include "wait_internal.h" |
23 | | #include "common.h" |
24 | | #include "atomic.h" |
25 | | |
26 | | NSYNC_CPP_START_ |
27 | | |
28 | | /* Initialize *cv. */ |
29 | 29.2k | void nsync_cv_init (nsync_cv *cv) { |
30 | 29.2k | memset ((void *) cv, 0, sizeof (*cv)); |
31 | 29.2k | } |
32 | | |
33 | | /* Wake the cv waiters in the circular list pointed to by |
34 | | to_wake_list, which may not be NULL. If the waiter is associated with a |
35 | | nsync_mu, the "wakeup" may consist of transferring the waiters to the nsync_mu's |
36 | | queue. Requires that every waiter is associated with the same mutex. |
37 | | all_readers indicates whether all the waiters on the list are readers. */ |
38 | 0 | static void wake_waiters (nsync_dll_list_ to_wake_list, int all_readers) { |
39 | 0 | nsync_dll_element_ *p = NULL; |
40 | 0 | nsync_dll_element_ *next = NULL; |
41 | 0 | nsync_dll_element_ *first_waiter = nsync_dll_first_ (to_wake_list); |
42 | 0 | struct nsync_waiter_s *first_nw = DLL_NSYNC_WAITER (first_waiter); |
43 | 0 | waiter *first_w = NULL; |
44 | 0 | nsync_mu *pmu = NULL; |
45 | 0 | if ((first_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) { |
46 | 0 | first_w = DLL_WAITER (first_waiter); |
47 | 0 | pmu = first_w->cv_mu; |
48 | 0 | } |
49 | 0 | if (pmu != NULL) { /* waiter is associated with the nsync_mu *pmu. */ |
50 | | /* We will transfer elements of to_wake_list to *pmu if all of: |
51 | | - some thread holds the lock, and |
52 | | - *pmu's spinlock is not held, and |
53 | | - either *pmu cannot be acquired in the mode of the first |
54 | | waiter, or there's more than one thread on to_wake_list |
55 | | and not all are readers, and |
56 | | - we acquire the spinlock on the first try. |
57 | | The spinlock acquisition also marks *pmu as having waiters. |
58 | | The requirement that some thread holds the lock ensures |
59 | | that at least one of the transferred waiters will be woken. |
60 | | */ |
61 | 0 | uint32_t old_mu_word = ATM_LOAD (&pmu->word); |
62 | 0 | int first_cant_acquire = ((old_mu_word & first_w->l_type->zero_to_acquire) != 0); |
63 | 0 | next = nsync_dll_next_ (to_wake_list, first_waiter); |
64 | 0 | if ((old_mu_word&MU_ANY_LOCK) != 0 && |
65 | 0 | (old_mu_word&MU_SPINLOCK) == 0 && |
66 | 0 | (first_cant_acquire || (next != NULL && !all_readers)) && |
67 | 0 | ATM_CAS_ACQ (&pmu->word, old_mu_word, |
68 | 0 | (old_mu_word|MU_SPINLOCK|MU_WAITING) & |
69 | 0 | ~MU_ALL_FALSE)) { |
70 | |
|
71 | 0 | uint32_t set_on_release = 0; |
72 | | |
73 | | /* For any waiter that should be transferred, rather |
74 | | than woken, move it from to_wake_list to pmu->waiters. */ |
75 | 0 | int first_is_writer = first_w->l_type == nsync_writer_type_; |
76 | 0 | int transferred_a_writer = 0; |
77 | 0 | int woke_areader = 0; |
78 | | /* Transfer the first waiter iff it can't acquire *pmu. */ |
79 | 0 | if (first_cant_acquire) { |
80 | 0 | to_wake_list = nsync_dll_remove_ (to_wake_list, first_waiter); |
81 | 0 | pmu->waiters = nsync_dll_make_last_in_list_ (pmu->waiters, first_waiter); |
82 | | /* tell nsync_cv_wait_with_deadline() that we |
83 | | moved the waiter to *pmu's queue. */ |
84 | 0 | first_w->cv_mu = NULL; |
85 | | /* first_nw.waiting is already 1, from being on |
86 | | cv's waiter queue. */ |
87 | 0 | transferred_a_writer = first_is_writer; |
88 | 0 | } else { |
89 | 0 | woke_areader = !first_is_writer; |
90 | 0 | } |
91 | | /* Now process the other waiters. */ |
92 | 0 | for (p = next; p != NULL; p = next) { |
93 | 0 | int p_is_writer; |
94 | 0 | struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p); |
95 | 0 | waiter *p_w = NULL; |
96 | 0 | if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) { |
97 | 0 | p_w = DLL_WAITER (p); |
98 | 0 | } |
99 | 0 | next = nsync_dll_next_ (to_wake_list, p); |
100 | 0 | p_is_writer = (p_w != NULL && |
101 | 0 | DLL_WAITER (p)->l_type == nsync_writer_type_); |
102 | | /* We transfer this element if any of: |
103 | | - the first waiter can't acquire *pmu, or |
104 | | - the first waiter is a writer, or |
105 | | - this element is a writer. */ |
106 | 0 | if (p_w == NULL) { |
107 | | /* wake non-native waiter */ |
108 | 0 | } else if (first_cant_acquire || first_is_writer || p_is_writer) { |
109 | 0 | to_wake_list = nsync_dll_remove_ (to_wake_list, p); |
110 | 0 | pmu->waiters = nsync_dll_make_last_in_list_ (pmu->waiters, p); |
111 | | /* tell nsync_cv_wait_with_deadline() |
112 | | that we moved the waiter to *pmu's |
113 | | queue. */ |
114 | 0 | p_w->cv_mu = NULL; |
115 | | /* p_nw->waiting is already 1, from |
116 | | being on cv's waiter queue. */ |
117 | 0 | transferred_a_writer = transferred_a_writer || p_is_writer; |
118 | 0 | } else { |
119 | 0 | woke_areader = woke_areader || !p_is_writer; |
120 | 0 | } |
121 | 0 | } |
122 | | |
123 | | /* Claim a waiting writer if we transferred one, except if we woke readers, |
124 | | in which case we want those readers to be able to acquire immediately. */ |
125 | 0 | if (transferred_a_writer && !woke_areader) { |
126 | 0 | set_on_release |= MU_WRITER_WAITING; |
127 | 0 | } |
128 | | |
129 | | /* release *pmu's spinlock (MU_WAITING was set by CAS above) */ |
130 | 0 | old_mu_word = ATM_LOAD (&pmu->word); |
131 | 0 | while (!ATM_CAS_REL (&pmu->word, old_mu_word, |
132 | 0 | (old_mu_word|set_on_release) & ~MU_SPINLOCK)) { |
133 | 0 | old_mu_word = ATM_LOAD (&pmu->word); |
134 | 0 | } |
135 | 0 | } |
136 | 0 | } |
137 | | |
138 | | /* Wake any waiters we didn't manage to enqueue on the mu. */ |
139 | 0 | for (p = nsync_dll_first_ (to_wake_list); p != NULL; p = next) { |
140 | 0 | struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p); |
141 | 0 | next = nsync_dll_next_ (to_wake_list, p); |
142 | 0 | to_wake_list = nsync_dll_remove_ (to_wake_list, p); |
143 | | /* Wake the waiter. */ |
144 | 0 | ATM_STORE_REL (&p_nw->waiting, 0); /* release store */ |
145 | 0 | nsync_mu_semaphore_v (p_nw->sem); |
146 | 0 | } |
147 | 0 | } |
148 | | |
149 | | /* ------------------------------------------ */ |
150 | | |
151 | | /* Versions of nsync_mu_lock() and nsync_mu_unlock() that take "void *" |
152 | | arguments, to avoid call through a function pointer of a different type, |
153 | | which is undefined. */ |
154 | 0 | static void void_mu_lock (void *mu) { |
155 | 0 | nsync_mu_lock ((nsync_mu *) mu); |
156 | 0 | } |
157 | 0 | static void void_mu_unlock (void *mu) { |
158 | 0 | nsync_mu_unlock ((nsync_mu *) mu); |
159 | 0 | } |
160 | | |
161 | | /* Atomically release *pmu (which must be held on entry) |
162 | | and block the calling thread on *pcv. Then wait until awakened by a |
163 | | call to nsync_cv_signal() or nsync_cv_broadcast() (or a spurious wakeup), or by the time |
164 | | reaching abs_deadline, or by cancel_note being notified. In all cases, |
165 | | reacquire *pmu, and return the reason for the call returned (0, ETIMEDOUT, |
166 | | or ECANCELED). Callers should abs_deadline==nsync_time_no_deadline for no |
167 | | deadline, and cancel_note==NULL for no cancellation. nsync_cv_wait_with_deadline() |
168 | | should be used in a loop, as with all Mesa-style condition variables. See |
169 | | examples above. |
170 | | |
171 | | There are two reasons for using an absolute deadline, rather than a relative |
172 | | timeout---these are why pthread_cond_timedwait() also uses an absolute |
173 | | deadline. First, condition variable waits have to be used in a loop; with |
174 | | an absolute times, the deadline does not have to be recomputed on each |
175 | | iteration. Second, in most real programmes, some activity (such as an RPC |
176 | | to a server, or when guaranteeing response time in a UI), there is a |
177 | | deadline imposed by the specification or the caller/user; relative delays |
178 | | can shift arbitrarily with scheduling delays, and so after multiple waits |
179 | | might extend beyond the expected deadline. Relative delays tend to be more |
180 | | convenient mostly in tests and trivial examples than they are in real |
181 | | programmes. */ |
182 | | int nsync_cv_wait_with_deadline_generic (nsync_cv *pcv, void *pmu, |
183 | | void (*lock) (void *), void (*unlock) (void *), |
184 | | nsync_time abs_deadline, |
185 | 0 | nsync_note cancel_note) { |
186 | 0 | nsync_mu *cv_mu = NULL; |
187 | 0 | int is_reader_mu; |
188 | 0 | uint32_t old_word; |
189 | 0 | uint32_t remove_count; |
190 | 0 | int sem_outcome; |
191 | 0 | unsigned attempts; |
192 | 0 | int outcome = 0; |
193 | 0 | waiter *w; |
194 | 0 | IGNORE_RACES_START (); |
195 | 0 | w = nsync_waiter_new_ (); |
196 | 0 | ATM_STORE (&w->nw.waiting, 1); |
197 | 0 | w->cond.f = NULL; /* Not using a conditional critical section. */ |
198 | 0 | w->cond.v = NULL; |
199 | 0 | w->cond.eq = NULL; |
200 | 0 | if (lock == &void_mu_lock || |
201 | 0 | lock == (void (*) (void *)) &nsync_mu_lock || |
202 | 0 | lock == (void (*) (void *)) &nsync_mu_rlock) { |
203 | 0 | cv_mu = (nsync_mu *) pmu; |
204 | 0 | } |
205 | 0 | w->cv_mu = cv_mu; /* If *pmu is an nsync_mu, record its address, else record NULL. */ |
206 | 0 | is_reader_mu = 0; /* If true, an nsync_mu in reader mode. */ |
207 | 0 | if (cv_mu == NULL) { |
208 | 0 | w->l_type = NULL; |
209 | 0 | } else { |
210 | 0 | uint32_t old_mu_word = ATM_LOAD (&cv_mu->word); |
211 | 0 | int is_writer = (old_mu_word & MU_WHELD_IF_NON_ZERO) != 0; |
212 | 0 | int is_reader = (old_mu_word & MU_RHELD_IF_NON_ZERO) != 0; |
213 | 0 | if (is_writer) { |
214 | 0 | if (is_reader) { |
215 | 0 | nsync_panic_ ("mu held in reader and writer mode simultaneously " |
216 | 0 | "on entry to nsync_cv_wait_with_deadline()\n"); |
217 | 0 | } |
218 | 0 | w->l_type = nsync_writer_type_; |
219 | 0 | } else if (is_reader) { |
220 | 0 | w->l_type = nsync_reader_type_; |
221 | 0 | is_reader_mu = 1; |
222 | 0 | } else { |
223 | 0 | nsync_panic_ ("mu not held on entry to nsync_cv_wait_with_deadline()\n"); |
224 | 0 | } |
225 | 0 | } |
226 | | |
227 | | /* acquire spinlock, set non-empty */ |
228 | 0 | old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK|CV_NON_EMPTY, 0); |
229 | 0 | pcv->waiters = nsync_dll_make_last_in_list_ (pcv->waiters, &w->nw.q); |
230 | 0 | remove_count = ATM_LOAD (&w->remove_count); |
231 | | /* Release the spin lock. */ |
232 | 0 | ATM_STORE_REL (&pcv->word, old_word|CV_NON_EMPTY); /* release store */ |
233 | | |
234 | | /* Release *pmu. */ |
235 | 0 | if (is_reader_mu) { |
236 | 0 | nsync_mu_runlock (cv_mu); |
237 | 0 | } else { |
238 | 0 | (*unlock) (pmu); |
239 | 0 | } |
240 | | |
241 | | /* wait until awoken or a timeout. */ |
242 | 0 | sem_outcome = 0; |
243 | 0 | attempts = 0; |
244 | 0 | while (ATM_LOAD_ACQ (&w->nw.waiting) != 0) { /* acquire load */ |
245 | 0 | if (sem_outcome == 0) { |
246 | 0 | sem_outcome = nsync_sem_wait_with_cancel_ (w, abs_deadline, cancel_note); |
247 | 0 | } |
248 | |
|
249 | 0 | if (sem_outcome != 0 && ATM_LOAD (&w->nw.waiting) != 0) { |
250 | | /* A timeout or cancellation occurred, and no wakeup. |
251 | | Acquire *pcv's spinlock, and confirm. */ |
252 | 0 | old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, |
253 | 0 | CV_SPINLOCK, 0); |
254 | | /* Check that w wasn't removed from the queue after we |
255 | | checked above, but before we acquired the spinlock. |
256 | | The test of remove_count confirms that the waiter *w |
257 | | is still governed by *pcv's spinlock; otherwise, some |
258 | | other thread is about to set w.waiting==0. */ |
259 | 0 | if (ATM_LOAD (&w->nw.waiting) != 0) { |
260 | 0 | if (remove_count == ATM_LOAD (&w->remove_count)) { |
261 | 0 | uint32_t old_value; |
262 | | /* still in cv waiter queue */ |
263 | | /* Not woken, so remove *w from cv |
264 | | queue, and declare a |
265 | | timeout/cancellation. */ |
266 | 0 | outcome = sem_outcome; |
267 | 0 | pcv->waiters = nsync_dll_remove_ (pcv->waiters, |
268 | 0 | &w->nw.q); |
269 | 0 | do { |
270 | 0 | old_value = ATM_LOAD (&w->remove_count); |
271 | 0 | } while (!ATM_CAS (&w->remove_count, old_value, old_value+1)); |
272 | 0 | if (nsync_dll_is_empty_ (pcv->waiters)) { |
273 | 0 | old_word &= ~(CV_NON_EMPTY); |
274 | 0 | } |
275 | 0 | ATM_STORE_REL (&w->nw.waiting, 0); /* release store */ |
276 | 0 | } |
277 | 0 | } |
278 | | /* Release spinlock. */ |
279 | 0 | ATM_STORE_REL (&pcv->word, old_word); /* release store */ |
280 | 0 | } |
281 | |
|
282 | 0 | if (ATM_LOAD (&w->nw.waiting) != 0) { |
283 | | /* The delay here causes this thread ultimately to |
284 | | yield to another that has dequeued this thread, but |
285 | | has not yet set the waiting field to zero; a |
286 | | cancellation or timeout may prevent this thread |
287 | | from blocking above on the semaphore. */ |
288 | 0 | attempts = nsync_spin_delay_ (attempts); |
289 | 0 | } |
290 | 0 | } |
291 | |
|
292 | 0 | if (cv_mu != NULL && w->cv_mu == NULL) { /* waiter was moved to *pmu's queue, and woken. */ |
293 | | /* Requeue on *pmu using existing waiter struct; current thread |
294 | | is the designated waker. */ |
295 | 0 | nsync_mu_lock_slow_ (cv_mu, w, MU_DESIG_WAKER, w->l_type); |
296 | 0 | RWLOCK_TRYACQUIRE (1, cv_mu, w->l_type == nsync_writer_type_); |
297 | 0 | nsync_waiter_free_ (w); |
298 | 0 | } else { |
299 | | /* Traditional case: We've woken from the cv, and need to reacquire *pmu. */ |
300 | 0 | nsync_waiter_free_ (w); |
301 | 0 | if (is_reader_mu) { |
302 | 0 | nsync_mu_rlock (cv_mu); |
303 | 0 | } else { |
304 | 0 | (*lock) (pmu); |
305 | 0 | } |
306 | 0 | } |
307 | 0 | IGNORE_RACES_END (); |
308 | 0 | return (outcome); |
309 | 0 | } |
310 | | |
311 | | /* Wake at least one thread if any are currently blocked on *pcv. If |
312 | | the chosen thread is a reader on an nsync_mu, wake all readers and, if |
313 | | possible, a writer. */ |
314 | 0 | void nsync_cv_signal (nsync_cv *pcv) { |
315 | 0 | IGNORE_RACES_START (); |
316 | 0 | if ((ATM_LOAD_ACQ (&pcv->word) & CV_NON_EMPTY) != 0) { /* acquire load */ |
317 | 0 | nsync_dll_list_ to_wake_list = NULL; /* waiters that we will wake */ |
318 | 0 | int all_readers = 0; |
319 | | /* acquire spinlock */ |
320 | 0 | uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, |
321 | 0 | CV_SPINLOCK, 0); |
322 | 0 | if (!nsync_dll_is_empty_ (pcv->waiters)) { |
323 | | /* Point to first waiter that enqueued itself, and |
324 | | detach it from all others. */ |
325 | 0 | struct nsync_waiter_s *first_nw; |
326 | 0 | nsync_dll_element_ *first = nsync_dll_first_ (pcv->waiters); |
327 | 0 | pcv->waiters = nsync_dll_remove_ (pcv->waiters, first); |
328 | 0 | first_nw = DLL_NSYNC_WAITER (first); |
329 | 0 | if ((first_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) { |
330 | 0 | uint32_t old_value; |
331 | 0 | do { |
332 | 0 | old_value = |
333 | 0 | ATM_LOAD (&DLL_WAITER (first)->remove_count); |
334 | 0 | } while (!ATM_CAS (&DLL_WAITER (first)->remove_count, |
335 | 0 | old_value, old_value+1)); |
336 | 0 | } |
337 | 0 | to_wake_list = nsync_dll_make_last_in_list_ (to_wake_list, first); |
338 | 0 | if ((first_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0 && |
339 | 0 | DLL_WAITER (first)->l_type == nsync_reader_type_) { |
340 | 0 | int woke_writer; |
341 | | /* If the first waiter is a reader, wake all readers, and |
342 | | if it's possible, one writer. This allows reader-regions |
343 | | to be added to a monitor without invalidating code in which |
344 | | a client has optimized broadcast calls by converting them to |
345 | | signal calls. In particular, we wake a writer when waking |
346 | | readers because the readers will not invalidate the condition |
347 | | that motivated the client to call nsync_cv_signal(). But we |
348 | | wake at most one writer because the first writer may invalidate |
349 | | the condition; the client is expecting only one writer to be |
350 | | able make use of the wakeup, or he would have called |
351 | | nsync_cv_broadcast(). */ |
352 | 0 | nsync_dll_element_ *p = NULL; |
353 | 0 | nsync_dll_element_ *next = NULL; |
354 | 0 | all_readers = 1; |
355 | 0 | woke_writer = 0; |
356 | 0 | for (p = nsync_dll_first_ (pcv->waiters); p != NULL; p = next) { |
357 | 0 | struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p); |
358 | 0 | int should_wake; |
359 | 0 | next = nsync_dll_next_ (pcv->waiters, p); |
360 | 0 | should_wake = 0; |
361 | 0 | if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0 && |
362 | 0 | DLL_WAITER (p)->l_type == nsync_reader_type_) { |
363 | 0 | should_wake = 1; |
364 | 0 | } else if (!woke_writer) { |
365 | 0 | woke_writer = 1; |
366 | 0 | all_readers = 0; |
367 | 0 | should_wake = 1; |
368 | 0 | } |
369 | 0 | if (should_wake) { |
370 | 0 | pcv->waiters = nsync_dll_remove_ (pcv->waiters, p); |
371 | 0 | if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) { |
372 | 0 | uint32_t old_value; |
373 | 0 | do { |
374 | 0 | old_value = ATM_LOAD ( |
375 | 0 | &DLL_WAITER (p)->remove_count); |
376 | 0 | } while (!ATM_CAS (&DLL_WAITER (p)->remove_count, |
377 | 0 | old_value, old_value+1)); |
378 | 0 | } |
379 | 0 | to_wake_list = nsync_dll_make_last_in_list_ ( |
380 | 0 | to_wake_list, p); |
381 | 0 | } |
382 | 0 | } |
383 | 0 | } |
384 | 0 | if (nsync_dll_is_empty_ (pcv->waiters)) { |
385 | 0 | old_word &= ~(CV_NON_EMPTY); |
386 | 0 | } |
387 | 0 | } |
388 | | /* Release spinlock. */ |
389 | 0 | ATM_STORE_REL (&pcv->word, old_word); /* release store */ |
390 | 0 | if (!nsync_dll_is_empty_ (to_wake_list)) { |
391 | 0 | wake_waiters (to_wake_list, all_readers); |
392 | 0 | } |
393 | 0 | } |
394 | 0 | IGNORE_RACES_END (); |
395 | 0 | } |
396 | | |
397 | | /* Wake all threads currently blocked on *pcv. */ |
398 | 0 | void nsync_cv_broadcast (nsync_cv *pcv) { |
399 | 0 | IGNORE_RACES_START (); |
400 | 0 | if ((ATM_LOAD_ACQ (&pcv->word) & CV_NON_EMPTY) != 0) { /* acquire load */ |
401 | 0 | nsync_dll_element_ *p; |
402 | 0 | nsync_dll_element_ *next; |
403 | 0 | int all_readers; |
404 | 0 | nsync_dll_list_ to_wake_list = NULL; /* waiters that we will wake */ |
405 | | /* acquire spinlock */ |
406 | 0 | nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0); |
407 | 0 | p = NULL; |
408 | 0 | next = NULL; |
409 | 0 | all_readers = 1; |
410 | | /* Wake entire waiter list, which we leave empty. */ |
411 | 0 | for (p = nsync_dll_first_ (pcv->waiters); p != NULL; p = next) { |
412 | 0 | struct nsync_waiter_s *p_nw = DLL_NSYNC_WAITER (p); |
413 | 0 | next = nsync_dll_next_ (pcv->waiters, p); |
414 | 0 | all_readers = all_readers && (p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0 && |
415 | 0 | (DLL_WAITER (p)->l_type == nsync_reader_type_); |
416 | 0 | pcv->waiters = nsync_dll_remove_ (pcv->waiters, p); |
417 | 0 | if ((p_nw->flags & NSYNC_WAITER_FLAG_MUCV) != 0) { |
418 | 0 | uint32_t old_value; |
419 | 0 | do { |
420 | 0 | old_value = ATM_LOAD (&DLL_WAITER (p)->remove_count); |
421 | 0 | } while (!ATM_CAS (&DLL_WAITER (p)->remove_count, |
422 | 0 | old_value, old_value+1)); |
423 | 0 | } |
424 | 0 | to_wake_list = nsync_dll_make_last_in_list_ (to_wake_list, p); |
425 | 0 | } |
426 | | /* Release spinlock and mark queue empty. */ |
427 | 0 | ATM_STORE_REL (&pcv->word, 0); /* release store */ |
428 | 0 | if (!nsync_dll_is_empty_ (to_wake_list)) { /* Wake them. */ |
429 | 0 | wake_waiters (to_wake_list, all_readers); |
430 | 0 | } |
431 | 0 | } |
432 | 0 | IGNORE_RACES_END (); |
433 | 0 | } |
434 | | |
435 | | /* Wait with deadline, using an nsync_mu. */ |
436 | | int nsync_cv_wait_with_deadline (nsync_cv *pcv, nsync_mu *pmu, |
437 | | nsync_time abs_deadline, |
438 | 0 | nsync_note cancel_note) { |
439 | 0 | return (nsync_cv_wait_with_deadline_generic (pcv, pmu, &void_mu_lock, |
440 | 0 | &void_mu_unlock, |
441 | 0 | abs_deadline, cancel_note)); |
442 | 0 | } |
443 | | |
444 | | /* Atomically release *pmu and block the caller on *pcv. Wait |
445 | | until awakened by a call to nsync_cv_signal() or nsync_cv_broadcast(), or a spurious |
446 | | wakeup. Then reacquires *pmu, and return. The call is equivalent to a call |
447 | | to nsync_cv_wait_with_deadline() with abs_deadline==nsync_time_no_deadline, and a NULL |
448 | | cancel_note. It should be used in a loop, as with all standard Mesa-style |
449 | | condition variables. See examples above. */ |
450 | 0 | void nsync_cv_wait (nsync_cv *pcv, nsync_mu *pmu) { |
451 | 0 | nsync_cv_wait_with_deadline (pcv, pmu, nsync_time_no_deadline, NULL); |
452 | 0 | } |
453 | | |
454 | 0 | static nsync_time cv_ready_time (void *v UNUSED, struct nsync_waiter_s *nw) { |
455 | 0 | nsync_time r; |
456 | 0 | r = (nw == NULL || ATM_LOAD_ACQ (&nw->waiting) != 0? nsync_time_no_deadline : nsync_time_zero); |
457 | 0 | return (r); |
458 | 0 | } |
459 | | |
460 | 0 | static int cv_enqueue (void *v, struct nsync_waiter_s *nw) { |
461 | 0 | nsync_cv *pcv = (nsync_cv *) v; |
462 | | /* acquire spinlock */ |
463 | 0 | uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0); |
464 | 0 | pcv->waiters = nsync_dll_make_last_in_list_ (pcv->waiters, &nw->q); |
465 | 0 | ATM_STORE (&nw->waiting, 1); |
466 | | /* Release spinlock. */ |
467 | 0 | ATM_STORE_REL (&pcv->word, old_word | CV_NON_EMPTY); /* release store */ |
468 | 0 | return (1); |
469 | 0 | } |
470 | | |
471 | 0 | static int cv_dequeue (void *v, struct nsync_waiter_s *nw) { |
472 | 0 | nsync_cv *pcv = (nsync_cv *) v; |
473 | 0 | int was_queued = 0; |
474 | | /* acquire spinlock */ |
475 | 0 | uint32_t old_word = nsync_spin_test_and_set_ (&pcv->word, CV_SPINLOCK, CV_SPINLOCK, 0); |
476 | 0 | if (ATM_LOAD_ACQ (&nw->waiting) != 0) { |
477 | 0 | pcv->waiters = nsync_dll_remove_ (pcv->waiters, &nw->q); |
478 | 0 | ATM_STORE (&nw->waiting, 0); |
479 | 0 | was_queued = 1; |
480 | 0 | } |
481 | 0 | if (nsync_dll_is_empty_ (pcv->waiters)) { |
482 | 0 | old_word &= ~(CV_NON_EMPTY); |
483 | 0 | } |
484 | | /* Release spinlock. */ |
485 | 0 | ATM_STORE_REL (&pcv->word, old_word); /* release store */ |
486 | 0 | return (was_queued); |
487 | 0 | } |
488 | | |
489 | | const struct nsync_waitable_funcs_s nsync_cv_waitable_funcs = { |
490 | | &cv_ready_time, |
491 | | &cv_enqueue, |
492 | | &cv_dequeue |
493 | | }; |
494 | | |
495 | | NSYNC_CPP_END_ |