/proc/self/cwd/external/nsync/internal/mu.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright 2016 Google Inc. |
2 | | |
3 | | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | you may not use this file except in compliance with the License. |
5 | | You may obtain a copy of the License at |
6 | | |
7 | | http://www.apache.org/licenses/LICENSE-2.0 |
8 | | |
9 | | Unless required by applicable law or agreed to in writing, software |
10 | | distributed under the License is distributed on an "AS IS" BASIS, |
11 | | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | See the License for the specific language governing permissions and |
13 | | limitations under the License. */ |
14 | | |
15 | | #include "nsync_cpp.h" |
16 | | #include "platform.h" |
17 | | #include "compiler.h" |
18 | | #include "cputype.h" |
19 | | #include "nsync.h" |
20 | | #include "dll.h" |
21 | | #include "sem.h" |
22 | | #include "wait_internal.h" |
23 | | #include "common.h" |
24 | | #include "atomic.h" |
25 | | |
26 | | NSYNC_CPP_START_ |
27 | | |
28 | | /* Initialize *mu. */ |
29 | 1.14M | void nsync_mu_init (nsync_mu *mu) { |
30 | 1.14M | memset ((void *) mu, 0, sizeof (*mu)); |
31 | 1.14M | RWLOCK_CREATE (mu); |
32 | 1.14M | } |
33 | | |
34 | | /* Release the mutex spinlock. */ |
35 | 1.00k | static void mu_release_spinlock (nsync_mu *mu) { |
36 | 1.00k | uint32_t old_word = ATM_LOAD (&mu->word); |
37 | 1.00k | while (!ATM_CAS_REL (&mu->word, old_word, old_word & ~MU_SPINLOCK)) { |
38 | 1 | old_word = ATM_LOAD (&mu->word); |
39 | 1 | } |
40 | 1.00k | } |
41 | | |
42 | | /* Lock *mu using the specified lock_type, waiting on *w if necessary. |
43 | | "clear" should be zero if the thread has not previously slept on *mu, and |
44 | | MU_DESIG_WAKER if it has; this represents bits that nsync_mu_lock_slow_() must clear when |
45 | | it either acquires or sleeps on *mu. The caller owns *w on return; it is in a valid |
46 | | state to be returned to the free pool. */ |
47 | 1.18k | void nsync_mu_lock_slow_ (nsync_mu *mu, waiter *w, uint32_t clear, lock_type *l_type) { |
48 | 1.18k | uint32_t zero_to_acquire; |
49 | 1.18k | uint32_t wait_count; |
50 | 1.18k | uint32_t long_wait; |
51 | 1.18k | unsigned attempts = 0; /* attempt count; used for spinloop backoff */ |
52 | 1.18k | w->cv_mu = NULL; /* not a cv wait */ |
53 | 1.18k | w->cond.f = NULL; /* Not using a conditional critical section. */ |
54 | 1.18k | w->cond.v = NULL; |
55 | 1.18k | w->cond.eq = NULL; |
56 | 1.18k | w->l_type = l_type; |
57 | 1.18k | zero_to_acquire = l_type->zero_to_acquire; |
58 | 1.18k | if (clear != 0) { |
59 | | /* Only the constraints of mutual exclusion should stop a designated waker. */ |
60 | 0 | zero_to_acquire &= ~(MU_WRITER_WAITING | MU_LONG_WAIT); |
61 | 0 | } |
62 | 1.18k | wait_count = 0; /* number of times we waited, and were woken. */ |
63 | 1.18k | long_wait = 0; /* set to MU_LONG_WAIT when wait_count gets large */ |
64 | 2.38k | for (;;) { |
65 | 2.38k | uint32_t old_word = ATM_LOAD (&mu->word); |
66 | 2.38k | if ((old_word & zero_to_acquire) == 0) { |
67 | | /* lock can be acquired; try to acquire, possibly |
68 | | clearing MU_DESIG_WAKER and MU_LONG_WAIT. */ |
69 | 1.19k | if (ATM_CAS_ACQ (&mu->word, old_word, |
70 | 1.19k | (old_word+l_type->add_to_acquire) & |
71 | 1.19k | ~(clear|long_wait|l_type->clear_on_acquire))) { |
72 | 1.18k | return; |
73 | 1.18k | } |
74 | 1.19k | } else if ((old_word&MU_SPINLOCK) == 0 && |
75 | 1.19k | ATM_CAS_ACQ (&mu->word, old_word, |
76 | 1.19k | (old_word|MU_SPINLOCK|long_wait| |
77 | 1.19k | l_type->set_when_waiting) & ~(clear | MU_ALL_FALSE))) { |
78 | | |
79 | | /* Spinlock is now held, and lock is held by someone |
80 | | else; MU_WAITING has also been set; queue ourselves. |
81 | | There's no need to adjust same_condition here, |
82 | | because w.condition==NULL. */ |
83 | 1.00k | ATM_STORE (&w->nw.waiting, 1); |
84 | 1.00k | if (wait_count == 0) { |
85 | | /* first wait goes to end of queue */ |
86 | 730 | mu->waiters = nsync_dll_make_last_in_list_ (mu->waiters, |
87 | 730 | &w->nw.q); |
88 | 730 | } else { |
89 | | /* subsequent waits go to front of queue */ |
90 | 273 | mu->waiters = nsync_dll_make_first_in_list_ (mu->waiters, |
91 | 273 | &w->nw.q); |
92 | 273 | } |
93 | | |
94 | | /* Release spinlock. Cannot use a store here, because |
95 | | the current thread does not hold the mutex. If |
96 | | another thread were a designated waker, the mutex |
97 | | holder could be concurrently unlocking, even though |
98 | | we hold the spinlock. */ |
99 | 1.00k | mu_release_spinlock (mu); |
100 | | |
101 | | /* wait until awoken. */ |
102 | 2.00k | while (ATM_LOAD_ACQ (&w->nw.waiting) != 0) { /* acquire load */ |
103 | 1.00k | nsync_mu_semaphore_p (&w->sem); |
104 | 1.00k | } |
105 | 1.00k | wait_count++; |
106 | | /* If the thread has been woken more than this many |
107 | | times, and still not acquired, it sets the |
108 | | MU_LONG_WAIT bit to prevent thread that have not |
109 | | waited from acquiring. This is the starvation |
110 | | avoidance mechanism. The number is fairly high so |
111 | | that we continue to benefit from the throughput of |
112 | | not having running threads wait unless absolutely |
113 | | necessary. */ |
114 | 1.00k | if (wait_count == LONG_WAIT_THRESHOLD) { /* repeatedly woken */ |
115 | 0 | long_wait = MU_LONG_WAIT; /* force others to wait at least once */ |
116 | 0 | } |
117 | | |
118 | 1.00k | attempts = 0; |
119 | 1.00k | clear = MU_DESIG_WAKER; |
120 | | /* Threads that have been woken at least once don't care |
121 | | about waiting writers or long waiters. */ |
122 | 1.00k | zero_to_acquire &= ~(MU_WRITER_WAITING | MU_LONG_WAIT); |
123 | 1.00k | } |
124 | 1.20k | attempts = nsync_spin_delay_ (attempts); |
125 | 1.20k | } |
126 | 1.18k | } |
127 | | |
128 | | /* Attempt to acquire *mu in writer mode without blocking, and return non-zero |
129 | | iff successful. Return non-zero with high probability if *mu was free on |
130 | | entry. */ |
131 | 0 | int nsync_mu_trylock (nsync_mu *mu) { |
132 | 0 | int result; |
133 | 0 | IGNORE_RACES_START (); |
134 | 0 | if (ATM_CAS_ACQ (&mu->word, 0, MU_WADD_TO_ACQUIRE)) { /* acquire CAS */ |
135 | 0 | result = 1; |
136 | 0 | } else { |
137 | 0 | uint32_t old_word = ATM_LOAD (&mu->word); |
138 | 0 | result = ((old_word & MU_WZERO_TO_ACQUIRE) == 0 && |
139 | 0 | ATM_CAS_ACQ (&mu->word, old_word, |
140 | 0 | (old_word + MU_WADD_TO_ACQUIRE) & ~MU_WCLEAR_ON_ACQUIRE)); |
141 | 0 | } |
142 | 0 | IGNORE_RACES_END (); |
143 | 0 | RWLOCK_TRYACQUIRE (result, mu, 1); |
144 | 0 | return (result); |
145 | 0 | } |
146 | | |
147 | | /* Block until *mu is free and then acquire it in writer mode. */ |
148 | 2.77M | void nsync_mu_lock (nsync_mu *mu) { |
149 | 2.77M | IGNORE_RACES_START (); |
150 | 2.77M | if (!ATM_CAS_ACQ (&mu->word, 0, MU_WADD_TO_ACQUIRE)) { /* acquire CAS */ |
151 | 1.67k | uint32_t old_word = ATM_LOAD (&mu->word); |
152 | 1.67k | if ((old_word&MU_WZERO_TO_ACQUIRE) != 0 || |
153 | 1.67k | !ATM_CAS_ACQ (&mu->word, old_word, |
154 | 1.67k | (old_word+MU_WADD_TO_ACQUIRE) & ~MU_WCLEAR_ON_ACQUIRE)) { |
155 | 1.17k | waiter *w = nsync_waiter_new_ (); |
156 | 1.17k | nsync_mu_lock_slow_ (mu, w, 0, nsync_writer_type_); |
157 | 1.17k | nsync_waiter_free_ (w); |
158 | 1.17k | } |
159 | 1.67k | } |
160 | 2.77M | IGNORE_RACES_END (); |
161 | 2.77M | RWLOCK_TRYACQUIRE (1, mu, 1); |
162 | 2.77M | } |
163 | | |
164 | | /* Attempt to acquire *mu in reader mode without blocking, and return non-zero |
165 | | iff successful. Returns non-zero with high probability if *mu was free on |
166 | | entry. It may fail to acquire if a writer is waiting, to avoid starvation. |
167 | | */ |
168 | 0 | int nsync_mu_rtrylock (nsync_mu *mu) { |
169 | 0 | int result; |
170 | 0 | IGNORE_RACES_START (); |
171 | 0 | if (ATM_CAS_ACQ (&mu->word, 0, MU_RADD_TO_ACQUIRE)) { /* acquire CAS */ |
172 | 0 | result = 1; |
173 | 0 | } else { |
174 | 0 | uint32_t old_word = ATM_LOAD (&mu->word); |
175 | 0 | result = ((old_word&MU_RZERO_TO_ACQUIRE) == 0 && |
176 | 0 | ATM_CAS_ACQ (&mu->word, old_word, |
177 | 0 | (old_word+MU_RADD_TO_ACQUIRE) & ~MU_RCLEAR_ON_ACQUIRE)); |
178 | 0 | } |
179 | 0 | IGNORE_RACES_END (); |
180 | 0 | RWLOCK_TRYACQUIRE (result, mu, 0); |
181 | 0 | return (result); |
182 | 0 | } |
183 | | |
184 | | /* Block until *mu can be acquired in reader mode and then acquire it. */ |
185 | 5.03M | void nsync_mu_rlock (nsync_mu *mu) { |
186 | 5.03M | IGNORE_RACES_START (); |
187 | 5.03M | if (!ATM_CAS_ACQ (&mu->word, 0, MU_RADD_TO_ACQUIRE)) { /* acquire CAS */ |
188 | 0 | uint32_t old_word = ATM_LOAD (&mu->word); |
189 | 0 | if ((old_word&MU_RZERO_TO_ACQUIRE) != 0 || |
190 | 0 | !ATM_CAS_ACQ (&mu->word, old_word, |
191 | 0 | (old_word+MU_RADD_TO_ACQUIRE) & ~MU_RCLEAR_ON_ACQUIRE)) { |
192 | 0 | waiter *w = nsync_waiter_new_ (); |
193 | 0 | nsync_mu_lock_slow_ (mu, w, 0, nsync_reader_type_); |
194 | 0 | nsync_waiter_free_ (w); |
195 | 0 | } |
196 | 0 | } |
197 | 5.03M | IGNORE_RACES_END (); |
198 | 5.03M | RWLOCK_TRYACQUIRE (1, mu, 0); |
199 | 5.03M | } |
200 | | |
201 | | /* Invoke the condition associated with *p, which is an element of |
202 | | a "waiter" list. */ |
203 | 0 | static int condition_true (nsync_dll_element_ *p) { |
204 | 0 | return ((*DLL_WAITER (p)->cond.f) (DLL_WAITER (p)->cond.v)); |
205 | 0 | } |
206 | | |
207 | | /* If *p is an element of waiter_list (a list of "waiter" structs(, return a |
208 | | pointer to the next element of the list that has a different condition. */ |
209 | | static nsync_dll_element_ *skip_past_same_condition ( |
210 | 0 | nsync_dll_list_ waiter_list, nsync_dll_element_ *p) { |
211 | 0 | nsync_dll_element_ *next; |
212 | 0 | nsync_dll_element_ *last_with_same_condition = |
213 | 0 | &DLL_WAITER_SAMECOND (DLL_WAITER (p)->same_condition.prev)->nw.q; |
214 | 0 | if (last_with_same_condition != p && last_with_same_condition != p->prev) { |
215 | | /* First in set with same condition, so skip to end. */ |
216 | 0 | next = nsync_dll_next_ (waiter_list, last_with_same_condition); |
217 | 0 | } else { |
218 | 0 | next = nsync_dll_next_ (waiter_list, p); |
219 | 0 | } |
220 | 0 | return (next); |
221 | 0 | } |
222 | | |
223 | | /* Merge the same_condition lists of *p and *n if they have the same non-NULL |
224 | | condition. */ |
225 | 1.00k | void nsync_maybe_merge_conditions_ (nsync_dll_element_ *p, nsync_dll_element_ *n) { |
226 | 1.00k | if (p != NULL && n != NULL && |
227 | 1.00k | WAIT_CONDITION_EQ (&DLL_WAITER (p)->cond, &DLL_WAITER (n)->cond)) { |
228 | 0 | nsync_dll_splice_after_ (&DLL_WAITER (p)->same_condition, |
229 | 0 | &DLL_WAITER (n)->same_condition); |
230 | 0 | } |
231 | 1.00k | } |
232 | | |
233 | | /* Remove element *e from nsync_mu waiter queue mu_queue, fixing |
234 | | up the same_condition list by merging the lists on either side if possible. |
235 | | Also increment the waiter's remove_count. */ |
236 | 1.00k | nsync_dll_list_ nsync_remove_from_mu_queue_ (nsync_dll_list_ mu_queue, nsync_dll_element_ *e) { |
237 | | /* Record previous and next elements in the original queue. */ |
238 | 1.00k | nsync_dll_element_ *prev = e->prev; |
239 | 1.00k | nsync_dll_element_ *next = e->next; |
240 | 1.00k | uint32_t old_value; |
241 | | /* Remove. */ |
242 | 1.00k | mu_queue = nsync_dll_remove_ (mu_queue, e); |
243 | 1.00k | do { |
244 | 1.00k | old_value = ATM_LOAD (&DLL_WAITER (e)->remove_count); |
245 | 1.00k | } while (!ATM_CAS (&DLL_WAITER (e)->remove_count, old_value, old_value+1)); |
246 | 1.00k | if (!nsync_dll_is_empty_ (mu_queue)) { |
247 | | /* Fix up same_condition. */ |
248 | 702 | nsync_dll_element_ *e_same_condition = &DLL_WAITER (e)->same_condition; |
249 | | |
250 | 702 | if (e_same_condition->next != e_same_condition) { |
251 | | /* *e is linked to a same_condition neighbour---just remove it. */ |
252 | 0 | e_same_condition->next->prev = e_same_condition->prev; |
253 | 0 | e_same_condition->prev->next = e_same_condition->next; |
254 | 0 | e_same_condition->next = e_same_condition; |
255 | 0 | e_same_condition->prev = e_same_condition; |
256 | 702 | } else if (prev != nsync_dll_last_ (mu_queue)) { |
257 | | /* Merge the new neighbours together if we can. */ |
258 | 0 | nsync_maybe_merge_conditions_ (prev, next); |
259 | 0 | } |
260 | 702 | } |
261 | 1.00k | return (mu_queue); |
262 | 1.00k | } |
263 | | |
264 | | /* Unlock *mu and wake one or more waiters as appropriate after an unlock. |
265 | | It is called with *mu held in mode l_type. */ |
266 | 1.00k | void nsync_mu_unlock_slow_ (nsync_mu *mu, lock_type *l_type) { |
267 | 1.00k | unsigned attempts = 0; /* attempt count; used for backoff */ |
268 | 1.07k | for (;;) { |
269 | 1.07k | uint32_t old_word = ATM_LOAD (&mu->word); |
270 | 1.07k | int testing_conditions = ((old_word & MU_CONDITION) != 0); |
271 | 1.07k | uint32_t early_release_mu = l_type->add_to_acquire; |
272 | 1.07k | uint32_t late_release_mu = 0; |
273 | 1.07k | if (testing_conditions) { |
274 | | /* Convert to a writer lock, and release later. |
275 | | - A writer lock is currently needed to test conditions |
276 | | because exclusive access is needed to the list to |
277 | | allow modification. The spinlock cannot be used |
278 | | to achieve that, because an internal lock should not |
279 | | be held when calling the external predicates. |
280 | | - We must test conditions even though a reader region |
281 | | cannot have made any new ones true because some |
282 | | might have been true before the reader region started. |
283 | | The MU_ALL_FALSE test below shortcuts the case where |
284 | | the conditions are known all to be false. */ |
285 | 0 | early_release_mu = l_type->add_to_acquire - MU_WLOCK; |
286 | 0 | late_release_mu = MU_WLOCK; |
287 | 0 | } |
288 | 1.07k | if ((old_word&MU_WAITING) == 0 || (old_word&MU_DESIG_WAKER) != 0 || |
289 | 1.07k | (old_word & MU_RLOCK_FIELD) > MU_RLOCK || |
290 | 1.07k | (old_word & (MU_RLOCK|MU_ALL_FALSE)) == (MU_RLOCK|MU_ALL_FALSE)) { |
291 | | /* no one to wake, there's a designated waker waking |
292 | | up, there are still readers, or it's a reader and all waiters |
293 | | have false conditions */ |
294 | 2 | if (ATM_CAS_REL (&mu->word, old_word, |
295 | 2 | (old_word - l_type->add_to_acquire) & |
296 | 2 | ~l_type->clear_on_uncontended_release)) { |
297 | 2 | return; |
298 | 2 | } |
299 | 1.07k | } else if ((old_word&MU_SPINLOCK) == 0 && |
300 | 1.07k | ATM_CAS_RELACQ (&mu->word, old_word, |
301 | 1.07k | (old_word-early_release_mu)|MU_SPINLOCK|MU_DESIG_WAKER)) { |
302 | 1.00k | nsync_dll_list_ wake; |
303 | 1.00k | lock_type *wake_type; |
304 | 1.00k | uint32_t clear_on_release; |
305 | 1.00k | uint32_t set_on_release; |
306 | | /* The spinlock is now held, and we've set the |
307 | | designated wake flag, since we're likely to wake a |
308 | | thread that will become that designated waker. If |
309 | | there are conditions to check, the mutex itself is |
310 | | still held. */ |
311 | | |
312 | 1.00k | nsync_dll_element_ *p = NULL; |
313 | 1.00k | nsync_dll_element_ *next = NULL; |
314 | | |
315 | | /* Swap the entire mu->waiters list into the local |
316 | | "new_waiters" list. This gives us exclusive access |
317 | | to the list, even if we unlock the spinlock, which |
318 | | we may do if checking conditions. The loop below |
319 | | will grab more new waiters that arrived while we |
320 | | were checking conditions, and terminates only if no |
321 | | new waiters arrive in one loop iteration. */ |
322 | 1.00k | nsync_dll_list_ waiters = NULL; |
323 | 1.00k | nsync_dll_list_ new_waiters = mu->waiters; |
324 | 1.00k | mu->waiters = NULL; |
325 | | |
326 | | /* Remove a waiter from the queue, if possible. */ |
327 | 1.00k | wake = NULL; /* waiters to wake. */ |
328 | 1.00k | wake_type = NULL; /* type of waiter(s) on wake, or NULL if wake is empty. */ |
329 | 1.00k | clear_on_release = MU_SPINLOCK; |
330 | 1.00k | set_on_release = MU_ALL_FALSE; |
331 | 2.00k | while (!nsync_dll_is_empty_ (new_waiters)) { /* some new waiters to consider */ |
332 | 1.00k | p = nsync_dll_first_ (new_waiters); |
333 | 1.00k | if (testing_conditions) { |
334 | | /* Should we continue to test conditions? */ |
335 | 0 | if (wake_type == nsync_writer_type_) { |
336 | | /* No, because we're already waking a writer, |
337 | | and need wake no others.*/ |
338 | 0 | testing_conditions = 0; |
339 | 0 | } else if (wake_type == NULL && |
340 | 0 | DLL_WAITER (p)->l_type != nsync_reader_type_ && |
341 | 0 | DLL_WAITER (p)->cond.f == NULL) { |
342 | | /* No, because we've woken no one, but the |
343 | | first waiter is a writer with no condition, |
344 | | so we will certainly wake it, and need wake |
345 | | no others. */ |
346 | 0 | testing_conditions = 0; |
347 | 0 | } |
348 | 0 | } |
349 | | /* If testing waiters' conditions, release the |
350 | | spinlock while still holding the write lock. |
351 | | This is so that the spinlock is not held |
352 | | while the conditions are evaluated. */ |
353 | 1.00k | if (testing_conditions) { |
354 | 0 | mu_release_spinlock (mu); |
355 | 0 | } |
356 | | |
357 | | /* Process the new waiters picked up in this iteration of the |
358 | | "while (!nsync_dll_is_empty_ (new_waiters))" loop, |
359 | | and stop looking when we run out of waiters, or we find |
360 | | a writer to wake up. */ |
361 | 2.00k | while (p != NULL && wake_type != nsync_writer_type_) { |
362 | 1.00k | int p_has_condition; |
363 | 1.00k | next = nsync_dll_next_ (new_waiters, p); |
364 | 1.00k | p_has_condition = (DLL_WAITER (p)->cond.f != NULL); |
365 | 1.00k | if (p_has_condition && !testing_conditions) { |
366 | 0 | nsync_panic_ ("checking a waiter condition " |
367 | 0 | "while unlocked\n"); |
368 | 0 | } |
369 | 1.00k | if (p_has_condition && !condition_true (p)) { |
370 | | /* condition is false */ |
371 | | /* skip to the end of the same_condition group. */ |
372 | 0 | next = skip_past_same_condition (new_waiters, p); |
373 | 1.00k | } else if (wake_type == NULL || |
374 | 1.00k | DLL_WAITER (p)->l_type == nsync_reader_type_) { |
375 | | /* Wake this thread. */ |
376 | 1.00k | new_waiters = nsync_remove_from_mu_queue_ ( |
377 | 1.00k | new_waiters, p); |
378 | 1.00k | wake = nsync_dll_make_last_in_list_ (wake, p); |
379 | 1.00k | wake_type = DLL_WAITER (p)->l_type; |
380 | 1.00k | } else { |
381 | | /* Failing to wake a writer |
382 | | that could acquire if it |
383 | | were first. */ |
384 | 0 | set_on_release |= MU_WRITER_WAITING; |
385 | 0 | set_on_release &= ~MU_ALL_FALSE; |
386 | 0 | } |
387 | 1.00k | p = next; |
388 | 1.00k | } |
389 | | |
390 | 1.00k | if (p != NULL) { |
391 | | /* Didn't search to end of list, so can't be sure |
392 | | all conditions are false. */ |
393 | 702 | set_on_release &= ~MU_ALL_FALSE; |
394 | 702 | } |
395 | | |
396 | | /* If testing waiters' conditions, reacquire the spinlock |
397 | | released above. */ |
398 | 1.00k | if (testing_conditions) { |
399 | 0 | nsync_spin_test_and_set_ (&mu->word, MU_SPINLOCK, |
400 | 0 | MU_SPINLOCK, 0); |
401 | 0 | } |
402 | | |
403 | | /* add the new_waiters to the last of the waiters. */ |
404 | 1.00k | nsync_maybe_merge_conditions_ (nsync_dll_last_ (waiters), |
405 | 1.00k | nsync_dll_first_ (new_waiters)); |
406 | 1.00k | waiters = nsync_dll_make_last_in_list_ (waiters, |
407 | 1.00k | nsync_dll_last_ (new_waiters)); |
408 | | /* Pick up the next set of new waiters. */ |
409 | 1.00k | new_waiters = mu->waiters; |
410 | 1.00k | mu->waiters = NULL; |
411 | 1.00k | } |
412 | | |
413 | | /* Return the local waiter list to *mu. */ |
414 | 1.00k | mu->waiters = waiters; |
415 | | |
416 | 1.00k | if (nsync_dll_is_empty_ (wake)) { |
417 | | /* not waking a waiter => no designated waker */ |
418 | 0 | clear_on_release |= MU_DESIG_WAKER; |
419 | 0 | } |
420 | | |
421 | 1.00k | if ((set_on_release & MU_ALL_FALSE) == 0) { |
422 | | /* If not explicitly setting MU_ALL_FALSE, clear it. */ |
423 | 702 | clear_on_release |= MU_ALL_FALSE; |
424 | 702 | } |
425 | | |
426 | 1.00k | if (nsync_dll_is_empty_ (mu->waiters)) { |
427 | | /* no waiters left */ |
428 | 301 | clear_on_release |= MU_WAITING | MU_WRITER_WAITING | |
429 | 301 | MU_CONDITION | MU_ALL_FALSE; |
430 | 301 | } |
431 | | |
432 | | /* Release the spinlock, and possibly the lock if |
433 | | late_release_mu is non-zero. Other bits are set or |
434 | | cleared according to whether we woke any threads, |
435 | | whether any waiters remain, and whether any of them |
436 | | are writers. */ |
437 | 1.00k | old_word = ATM_LOAD (&mu->word); |
438 | 1.00k | while (!ATM_CAS_REL (&mu->word, old_word, |
439 | 1.00k | ((old_word-late_release_mu)|set_on_release) & |
440 | 1.00k | ~clear_on_release)) { /* release CAS */ |
441 | 1 | old_word = ATM_LOAD (&mu->word); |
442 | 1 | } |
443 | | /* Wake the waiters. */ |
444 | 2.00k | for (p = nsync_dll_first_ (wake); p != NULL; p = next) { |
445 | 1.00k | next = nsync_dll_next_ (wake, p); |
446 | 1.00k | wake = nsync_dll_remove_ (wake, p); |
447 | 1.00k | ATM_STORE_REL (&DLL_NSYNC_WAITER (p)->waiting, 0); |
448 | 1.00k | nsync_mu_semaphore_v (&DLL_WAITER (p)->sem); |
449 | 1.00k | } |
450 | 1.00k | return; |
451 | 1.00k | } |
452 | 67 | attempts = nsync_spin_delay_ (attempts); |
453 | 67 | } |
454 | 1.00k | } |
455 | | |
456 | | /* Unlock *mu, which must be held in write mode, and wake waiters, if appropriate. */ |
457 | 2.77M | void nsync_mu_unlock (nsync_mu *mu) { |
458 | 2.77M | RWLOCK_RELEASE (mu, 1); |
459 | 2.77M | IGNORE_RACES_START (); |
460 | | /* C is not a garbage-collected language, so we cannot release until we |
461 | | can be sure that we will not have to touch the mutex again to wake a |
462 | | waiter. Another thread could acquire, decrement a reference count |
463 | | and deallocate the mutex before the current thread touched the mutex |
464 | | word again. */ |
465 | 2.77M | if (!ATM_CAS_REL (&mu->word, MU_WLOCK, 0)) { |
466 | 1.45k | uint32_t old_word = ATM_LOAD (&mu->word); |
467 | | /* Clear MU_ALL_FALSE because the critical section we're just |
468 | | leaving may have made some conditions true. */ |
469 | 1.45k | uint32_t new_word = (old_word - MU_WLOCK) & ~MU_ALL_FALSE; |
470 | | /* Sanity check: mutex must be held in write mode, and there |
471 | | must be no readers. */ |
472 | 1.45k | if ((new_word & (MU_RLOCK_FIELD | MU_WLOCK)) != 0) { |
473 | 0 | if ((old_word & MU_RLOCK_FIELD) != 0) { |
474 | 0 | nsync_panic_ ("attempt to nsync_mu_unlock() an nsync_mu " |
475 | 0 | "held in read mode\n"); |
476 | 0 | } else { |
477 | 0 | nsync_panic_ ("attempt to nsync_mu_unlock() an nsync_mu " |
478 | 0 | "not held in write mode\n"); |
479 | 0 | } |
480 | 1.45k | } else if ((old_word & (MU_WAITING|MU_DESIG_WAKER)) == MU_WAITING || |
481 | 1.45k | !ATM_CAS_REL (&mu->word, old_word, new_word)) { |
482 | | /* There are waiters and no designated waker, or |
483 | | our initial CAS attempt failed, to use slow path. */ |
484 | 1.00k | nsync_mu_unlock_slow_ (mu, nsync_writer_type_); |
485 | 1.00k | } |
486 | 1.45k | } |
487 | 2.77M | IGNORE_RACES_END (); |
488 | 2.77M | } |
489 | | |
490 | | /* Unlock *mu, which must be held in read mode, and wake waiters, if appropriate. */ |
491 | 5.03M | void nsync_mu_runlock (nsync_mu *mu) { |
492 | 5.03M | RWLOCK_RELEASE (mu, 0); |
493 | 5.03M | IGNORE_RACES_START (); |
494 | | /* See comment in nsync_mu_unlock(). */ |
495 | 5.03M | if (!ATM_CAS_REL (&mu->word, MU_RLOCK, 0)) { |
496 | 0 | uint32_t old_word = ATM_LOAD (&mu->word); |
497 | | /* Sanity check: mutex must not be held in write mode and |
498 | | reader count must not be 0. */ |
499 | 0 | if (((old_word ^ MU_WLOCK) & (MU_WLOCK | MU_RLOCK_FIELD)) == 0) { |
500 | 0 | if ((old_word & MU_WLOCK) != 0) { |
501 | 0 | nsync_panic_ ("attempt to nsync_mu_runlock() an nsync_mu " |
502 | 0 | "held in write mode\n"); |
503 | 0 | } else { |
504 | 0 | nsync_panic_ ("attempt to nsync_mu_runlock() an nsync_mu " |
505 | 0 | "not held in read mode\n"); |
506 | 0 | } |
507 | 0 | } else if ((old_word & (MU_WAITING | MU_DESIG_WAKER)) == MU_WAITING && |
508 | 0 | (old_word & (MU_RLOCK_FIELD|MU_ALL_FALSE)) == MU_RLOCK) { |
509 | | /* There are waiters and no designated waker, the last |
510 | | reader is unlocking, and not all waiters have a |
511 | | false condition. So we must take the slow path to |
512 | | attempt to wake a waiter. */ |
513 | 0 | nsync_mu_unlock_slow_ (mu, nsync_reader_type_); |
514 | 0 | } else if (!ATM_CAS_REL (&mu->word, old_word, old_word - MU_RLOCK)) { |
515 | | /* CAS attempt failed, so take slow path. */ |
516 | 0 | nsync_mu_unlock_slow_ (mu, nsync_reader_type_); |
517 | 0 | } |
518 | 0 | } |
519 | 5.03M | IGNORE_RACES_END (); |
520 | 5.03M | } |
521 | | |
522 | | /* Abort if *mu is not held in write mode. */ |
523 | 0 | void nsync_mu_assert_held (const nsync_mu *mu) { |
524 | 0 | IGNORE_RACES_START (); |
525 | 0 | if ((ATM_LOAD (&mu->word) & MU_WHELD_IF_NON_ZERO) == 0) { |
526 | 0 | nsync_panic_ ("nsync_mu not held in write mode\n"); |
527 | 0 | } |
528 | 0 | IGNORE_RACES_END (); |
529 | 0 | } |
530 | | |
531 | | /* Abort if *mu is not held in read or write mode. */ |
532 | 0 | void nsync_mu_rassert_held (const nsync_mu *mu) { |
533 | 0 | IGNORE_RACES_START (); |
534 | 0 | if ((ATM_LOAD (&mu->word) & MU_ANY_LOCK) == 0) { |
535 | 0 | nsync_panic_ ("nsync_mu not held in some mode\n"); |
536 | 0 | } |
537 | 0 | IGNORE_RACES_END (); |
538 | 0 | } |
539 | | |
540 | | /* Return whether *mu is held in read mode. |
541 | | Requires that *mu is held in some mode. */ |
542 | 0 | int nsync_mu_is_reader (const nsync_mu *mu) { |
543 | 0 | uint32_t word; |
544 | 0 | IGNORE_RACES_START (); |
545 | 0 | word = ATM_LOAD (&mu->word); |
546 | 0 | if ((word & MU_ANY_LOCK) == 0) { |
547 | 0 | nsync_panic_ ("nsync_mu not held in some mode\n"); |
548 | 0 | } |
549 | 0 | IGNORE_RACES_END (); |
550 | 0 | return ((word & MU_WLOCK) == 0); |
551 | 0 | } |
552 | | |
553 | | NSYNC_CPP_END_ |