/src/strongswan/src/libstrongswan/threading/rwlock.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2008-2012 Tobias Brunner |
3 | | * Copyright (C) 2008 Martin Willi |
4 | | * |
5 | | * Copyright (C) secunet Security Networks AG |
6 | | * |
7 | | * This program is free software; you can redistribute it and/or modify it |
8 | | * under the terms of the GNU General Public License as published by the |
9 | | * Free Software Foundation; either version 2 of the License, or (at your |
10 | | * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. |
11 | | * |
12 | | * This program is distributed in the hope that it will be useful, but |
13 | | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
14 | | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | | * for more details. |
16 | | */ |
17 | | |
18 | | #define _GNU_SOURCE |
19 | | #include <pthread.h> |
20 | | |
21 | | #include <library.h> |
22 | | #include <utils/debug.h> |
23 | | |
24 | | #include "rwlock.h" |
25 | | #include "rwlock_condvar.h" |
26 | | #include "thread.h" |
27 | | #include "condvar.h" |
28 | | #include "mutex.h" |
29 | | #include "lock_profiler.h" |
30 | | |
31 | | #ifdef __APPLE__ |
32 | | /* while pthread_rwlock_rdlock(3) says that it supports multiple read locks, |
33 | | * this does not seem to be true. After releasing a recursive rdlock, |
34 | | * a subsequent wrlock fails... */ |
35 | | # undef HAVE_PTHREAD_RWLOCK_INIT |
36 | | #endif |
37 | | |
38 | | typedef struct private_rwlock_t private_rwlock_t; |
39 | | typedef struct private_rwlock_condvar_t private_rwlock_condvar_t; |
40 | | |
41 | | /** |
42 | | * private data of rwlock |
43 | | */ |
44 | | struct private_rwlock_t { |
45 | | |
46 | | /** |
47 | | * public functions |
48 | | */ |
49 | | rwlock_t public; |
50 | | |
51 | | #ifdef HAVE_PTHREAD_RWLOCK_INIT |
52 | | |
53 | | /** |
54 | | * wrapped pthread rwlock |
55 | | */ |
56 | | pthread_rwlock_t rwlock; |
57 | | |
58 | | #else |
59 | | |
60 | | /** |
61 | | * mutex to emulate a native rwlock |
62 | | */ |
63 | | mutex_t *mutex; |
64 | | |
65 | | /** |
66 | | * condvar to handle writers |
67 | | */ |
68 | | condvar_t *writers; |
69 | | |
70 | | /** |
71 | | * condvar to handle readers |
72 | | */ |
73 | | condvar_t *readers; |
74 | | |
75 | | /** |
76 | | * number of waiting writers |
77 | | */ |
78 | | u_int waiting_writers; |
79 | | |
80 | | /** |
81 | | * number of readers holding the lock |
82 | | */ |
83 | | u_int reader_count; |
84 | | |
85 | | /** |
86 | | * TRUE, if a writer is holding the lock currently |
87 | | */ |
88 | | bool writer; |
89 | | |
90 | | #endif /* HAVE_PTHREAD_RWLOCK_INIT */ |
91 | | |
92 | | /** |
93 | | * profiling info, if enabled |
94 | | */ |
95 | | lock_profile_t profile; |
96 | | }; |
97 | | |
98 | | /** |
99 | | * private data of condvar |
100 | | */ |
101 | | struct private_rwlock_condvar_t { |
102 | | |
103 | | /** |
104 | | * public interface |
105 | | */ |
106 | | rwlock_condvar_t public; |
107 | | |
108 | | /** |
109 | | * mutex used to implement rwlock condvar |
110 | | */ |
111 | | mutex_t *mutex; |
112 | | |
113 | | /** |
114 | | * regular condvar to implement rwlock condvar |
115 | | */ |
116 | | condvar_t *condvar; |
117 | | }; |
118 | | |
119 | | |
120 | | #ifdef HAVE_PTHREAD_RWLOCK_INIT |
121 | | |
122 | | METHOD(rwlock_t, read_lock, void, |
123 | | private_rwlock_t *this) |
124 | 119k | { |
125 | 119k | int err; |
126 | | |
127 | 119k | profiler_start(&this->profile); |
128 | 119k | err = pthread_rwlock_rdlock(&this->rwlock); |
129 | 119k | if (err != 0) |
130 | 0 | { |
131 | 0 | DBG1(DBG_LIB, "!!! RWLOCK READ LOCK ERROR: %s !!!", strerror(err)); |
132 | 0 | } |
133 | 119k | profiler_end(&this->profile); |
134 | 119k | } |
135 | | |
136 | | METHOD(rwlock_t, write_lock, void, |
137 | | private_rwlock_t *this) |
138 | 288k | { |
139 | 288k | int err; |
140 | | |
141 | 288k | profiler_start(&this->profile); |
142 | 288k | err = pthread_rwlock_wrlock(&this->rwlock); |
143 | 288k | if (err != 0) |
144 | 0 | { |
145 | 0 | DBG1(DBG_LIB, "!!! RWLOCK WRITE LOCK ERROR: %s !!!", strerror(err)); |
146 | 0 | } |
147 | 288k | profiler_end(&this->profile); |
148 | 288k | } |
149 | | |
150 | | METHOD(rwlock_t, try_write_lock, bool, |
151 | | private_rwlock_t *this) |
152 | 0 | { |
153 | 0 | return pthread_rwlock_trywrlock(&this->rwlock) == 0; |
154 | 0 | } |
155 | | |
156 | | METHOD(rwlock_t, unlock, void, |
157 | | private_rwlock_t *this) |
158 | 408k | { |
159 | 408k | int err; |
160 | | |
161 | 408k | err = pthread_rwlock_unlock(&this->rwlock); |
162 | 408k | if (err != 0) |
163 | 0 | { |
164 | 0 | DBG1(DBG_LIB, "!!! RWLOCK UNLOCK ERROR: %s !!!", strerror(err)); |
165 | 0 | } |
166 | 408k | } |
167 | | |
168 | | METHOD(rwlock_t, destroy, void, |
169 | | private_rwlock_t *this) |
170 | 144k | { |
171 | 144k | pthread_rwlock_destroy(&this->rwlock); |
172 | 144k | profiler_cleanup(&this->profile); |
173 | 144k | free(this); |
174 | 144k | } |
175 | | |
176 | | /* |
177 | | * see header file |
178 | | */ |
179 | | rwlock_t *rwlock_create(rwlock_type_t type) |
180 | 144k | { |
181 | 144k | switch (type) |
182 | 144k | { |
183 | 144k | case RWLOCK_TYPE_DEFAULT: |
184 | 144k | default: |
185 | 144k | { |
186 | 144k | private_rwlock_t *this; |
187 | | |
188 | 144k | INIT(this, |
189 | 144k | .public = { |
190 | 144k | .read_lock = _read_lock, |
191 | 144k | .write_lock = _write_lock, |
192 | 144k | .try_write_lock = _try_write_lock, |
193 | 144k | .unlock = _unlock, |
194 | 144k | .destroy = _destroy, |
195 | 144k | } |
196 | 144k | ); |
197 | | |
198 | 144k | pthread_rwlock_init(&this->rwlock, NULL); |
199 | 144k | profiler_init(&this->profile); |
200 | | |
201 | 144k | return &this->public; |
202 | 144k | } |
203 | 144k | } |
204 | 144k | } |
205 | | |
206 | | #else /* HAVE_PTHREAD_RWLOCK_INIT */ |
207 | | |
208 | | /** |
209 | | * This implementation of the rwlock_t interface uses mutex_t and condvar_t |
210 | | * primitives, if the pthread_rwlock_* group of functions is not available or |
211 | | * don't allow recursive locking for readers. |
212 | | * |
213 | | * The following constraints are enforced: |
214 | | * - Multiple readers can hold the lock at the same time. |
215 | | * - Only a single writer can hold the lock at any given time. |
216 | | * - A writer must block until all readers have released the lock before |
217 | | * obtaining the lock exclusively. |
218 | | * - Readers that don't hold any read lock and arrive while a writer is |
219 | | * waiting to acquire the lock will block until after the writer has |
220 | | * obtained and released the lock. |
221 | | * These constraints allow for read sharing, prevent write sharing, prevent |
222 | | * read-write sharing and (largely) prevent starvation of writers by a steady |
223 | | * stream of incoming readers. Reader starvation is not prevented (this could |
224 | | * happen if there are more writers than readers). |
225 | | * |
226 | | * The implementation supports recursive locking of the read lock but not of |
227 | | * the write lock. Readers must not acquire the lock exclusively at the same |
228 | | * time and vice-versa (this is not checked or enforced so behave yourself to |
229 | | * prevent deadlocks). |
230 | | * |
231 | | * Since writers are preferred a thread currently holding the read lock that |
232 | | * tries to acquire the read lock recursively while a writer is waiting would |
233 | | * result in a deadlock. In order to avoid having to use a thread-specific |
234 | | * value for each rwlock_t (or a list of threads) to keep track if a thread |
235 | | * already acquired the read lock we use a single thread-specific value for all |
236 | | * rwlock_t objects that keeps track of how many read locks a thread currently |
237 | | * holds. Preferring readers that already hold ANY read locks prevents this |
238 | | * deadlock while it still largely avoids writer starvation (for locks that can |
239 | | * only be acquired while holding another read lock this will obviously not |
240 | | * work). |
241 | | */ |
242 | | |
243 | | /** |
244 | | * Keep track of how many read locks a thread holds. |
245 | | */ |
246 | | static pthread_key_t is_reader; |
247 | | |
248 | | /** |
249 | | * Only initialize the read lock counter once. |
250 | | */ |
251 | | static pthread_once_t is_reader_initialized = PTHREAD_ONCE_INIT; |
252 | | |
253 | | /** |
254 | | * Initialize the read lock counter. |
255 | | */ |
256 | | static void initialize_is_reader() |
257 | | { |
258 | | pthread_key_create(&is_reader, NULL); |
259 | | } |
260 | | |
261 | | METHOD(rwlock_t, read_lock, void, |
262 | | private_rwlock_t *this) |
263 | | { |
264 | | uintptr_t reading; |
265 | | bool old; |
266 | | |
267 | | reading = (uintptr_t)pthread_getspecific(is_reader); |
268 | | profiler_start(&this->profile); |
269 | | this->mutex->lock(this->mutex); |
270 | | if (!this->writer && reading > 0) |
271 | | { |
272 | | /* directly allow threads that hold ANY read locks, to avoid a deadlock |
273 | | * caused by preferring writers in the loop below */ |
274 | | } |
275 | | else |
276 | | { |
277 | | old = thread_cancelability(FALSE); |
278 | | while (this->writer || this->waiting_writers) |
279 | | { |
280 | | this->readers->wait(this->readers, this->mutex); |
281 | | } |
282 | | thread_cancelability(old); |
283 | | } |
284 | | this->reader_count++; |
285 | | profiler_end(&this->profile); |
286 | | this->mutex->unlock(this->mutex); |
287 | | pthread_setspecific(is_reader, (void*)(reading + 1)); |
288 | | } |
289 | | |
290 | | METHOD(rwlock_t, write_lock, void, |
291 | | private_rwlock_t *this) |
292 | | { |
293 | | bool old; |
294 | | |
295 | | profiler_start(&this->profile); |
296 | | this->mutex->lock(this->mutex); |
297 | | this->waiting_writers++; |
298 | | old = thread_cancelability(FALSE); |
299 | | while (this->writer || this->reader_count) |
300 | | { |
301 | | this->writers->wait(this->writers, this->mutex); |
302 | | } |
303 | | thread_cancelability(old); |
304 | | this->waiting_writers--; |
305 | | this->writer = TRUE; |
306 | | profiler_end(&this->profile); |
307 | | this->mutex->unlock(this->mutex); |
308 | | } |
309 | | |
310 | | METHOD(rwlock_t, try_write_lock, bool, |
311 | | private_rwlock_t *this) |
312 | | { |
313 | | bool res = FALSE; |
314 | | this->mutex->lock(this->mutex); |
315 | | if (!this->writer && !this->reader_count) |
316 | | { |
317 | | res = this->writer = TRUE; |
318 | | } |
319 | | this->mutex->unlock(this->mutex); |
320 | | return res; |
321 | | } |
322 | | |
323 | | METHOD(rwlock_t, unlock, void, |
324 | | private_rwlock_t *this) |
325 | | { |
326 | | this->mutex->lock(this->mutex); |
327 | | if (this->writer) |
328 | | { |
329 | | this->writer = FALSE; |
330 | | } |
331 | | else |
332 | | { |
333 | | uintptr_t reading; |
334 | | |
335 | | this->reader_count--; |
336 | | reading = (uintptr_t)pthread_getspecific(is_reader); |
337 | | pthread_setspecific(is_reader, (void*)(reading - 1)); |
338 | | } |
339 | | if (!this->reader_count) |
340 | | { |
341 | | if (this->waiting_writers) |
342 | | { |
343 | | this->writers->signal(this->writers); |
344 | | } |
345 | | else |
346 | | { |
347 | | this->readers->broadcast(this->readers); |
348 | | } |
349 | | } |
350 | | this->mutex->unlock(this->mutex); |
351 | | } |
352 | | |
353 | | METHOD(rwlock_t, destroy, void, |
354 | | private_rwlock_t *this) |
355 | | { |
356 | | this->mutex->destroy(this->mutex); |
357 | | this->writers->destroy(this->writers); |
358 | | this->readers->destroy(this->readers); |
359 | | profiler_cleanup(&this->profile); |
360 | | free(this); |
361 | | } |
362 | | |
363 | | /* |
364 | | * see header file |
365 | | */ |
366 | | rwlock_t *rwlock_create(rwlock_type_t type) |
367 | | { |
368 | | pthread_once(&is_reader_initialized, initialize_is_reader); |
369 | | |
370 | | switch (type) |
371 | | { |
372 | | case RWLOCK_TYPE_DEFAULT: |
373 | | default: |
374 | | { |
375 | | private_rwlock_t *this; |
376 | | |
377 | | INIT(this, |
378 | | .public = { |
379 | | .read_lock = _read_lock, |
380 | | .write_lock = _write_lock, |
381 | | .try_write_lock = _try_write_lock, |
382 | | .unlock = _unlock, |
383 | | .destroy = _destroy, |
384 | | }, |
385 | | .mutex = mutex_create(MUTEX_TYPE_DEFAULT), |
386 | | .writers = condvar_create(CONDVAR_TYPE_DEFAULT), |
387 | | .readers = condvar_create(CONDVAR_TYPE_DEFAULT), |
388 | | ); |
389 | | |
390 | | profiler_init(&this->profile); |
391 | | |
392 | | return &this->public; |
393 | | } |
394 | | } |
395 | | } |
396 | | |
397 | | #endif /* HAVE_PTHREAD_RWLOCK_INIT */ |
398 | | |
399 | | |
400 | | METHOD(rwlock_condvar_t, wait_, void, |
401 | | private_rwlock_condvar_t *this, rwlock_t *lock) |
402 | 0 | { |
403 | | /* at this point we have the write lock locked, to make signals more |
404 | | * predictable we try to prevent other threads from signaling by acquiring |
405 | | * the mutex while we still hold the write lock (this assumes they will |
406 | | * hold the write lock themselves when signaling, which is not mandatory) */ |
407 | 0 | this->mutex->lock(this->mutex); |
408 | | /* unlock the rwlock and wait for a signal */ |
409 | 0 | lock->unlock(lock); |
410 | | /* if the calling thread enabled thread cancelability we want to replicate |
411 | | * the behavior of the regular condvar, i.e. the lock will be held again |
412 | | * before executing cleanup functions registered by the calling thread */ |
413 | 0 | thread_cleanup_push((thread_cleanup_t)lock->write_lock, lock); |
414 | 0 | thread_cleanup_push((thread_cleanup_t)this->mutex->unlock, this->mutex); |
415 | 0 | this->condvar->wait(this->condvar, this->mutex); |
416 | | /* we release the mutex to allow other threads into the condvar (might even |
417 | | * be required so we can acquire the lock again below) */ |
418 | 0 | thread_cleanup_pop(TRUE); |
419 | | /* finally we reacquire the lock we held previously */ |
420 | 0 | thread_cleanup_pop(TRUE); |
421 | 0 | } |
422 | | |
423 | | METHOD(rwlock_condvar_t, timed_wait_abs, bool, |
424 | | private_rwlock_condvar_t *this, rwlock_t *lock, timeval_t time) |
425 | 0 | { |
426 | 0 | bool timed_out; |
427 | | |
428 | | /* see wait() above for details on what is going on here */ |
429 | 0 | this->mutex->lock(this->mutex); |
430 | 0 | lock->unlock(lock); |
431 | 0 | thread_cleanup_push((thread_cleanup_t)lock->write_lock, lock); |
432 | 0 | thread_cleanup_push((thread_cleanup_t)this->mutex->unlock, this->mutex); |
433 | 0 | timed_out = this->condvar->timed_wait_abs(this->condvar, this->mutex, time); |
434 | 0 | thread_cleanup_pop(TRUE); |
435 | 0 | thread_cleanup_pop(TRUE); |
436 | 0 | return timed_out; |
437 | 0 | } |
438 | | |
439 | | METHOD(rwlock_condvar_t, timed_wait, bool, |
440 | | private_rwlock_condvar_t *this, rwlock_t *lock, u_int timeout) |
441 | 0 | { |
442 | 0 | timeval_t tv; |
443 | 0 | u_int s, ms; |
444 | |
|
445 | 0 | time_monotonic(&tv); |
446 | |
|
447 | 0 | s = timeout / 1000; |
448 | 0 | ms = timeout % 1000; |
449 | |
|
450 | 0 | tv.tv_sec += s; |
451 | 0 | timeval_add_ms(&tv, ms); |
452 | |
|
453 | 0 | return timed_wait_abs(this, lock, tv); |
454 | 0 | } |
455 | | |
456 | | METHOD(rwlock_condvar_t, signal_, void, |
457 | | private_rwlock_condvar_t *this) |
458 | 0 | { |
459 | 0 | this->mutex->lock(this->mutex); |
460 | 0 | this->condvar->signal(this->condvar); |
461 | 0 | this->mutex->unlock(this->mutex); |
462 | 0 | } |
463 | | |
464 | | METHOD(rwlock_condvar_t, broadcast, void, |
465 | | private_rwlock_condvar_t *this) |
466 | 0 | { |
467 | 0 | this->mutex->lock(this->mutex); |
468 | 0 | this->condvar->broadcast(this->condvar); |
469 | 0 | this->mutex->unlock(this->mutex); |
470 | 0 | } |
471 | | |
472 | | METHOD(rwlock_condvar_t, condvar_destroy, void, |
473 | | private_rwlock_condvar_t *this) |
474 | 0 | { |
475 | 0 | this->condvar->destroy(this->condvar); |
476 | 0 | this->mutex->destroy(this->mutex); |
477 | 0 | free(this); |
478 | 0 | } |
479 | | |
480 | | /* |
481 | | * see header file |
482 | | */ |
483 | | rwlock_condvar_t *rwlock_condvar_create() |
484 | 0 | { |
485 | 0 | private_rwlock_condvar_t *this; |
486 | |
|
487 | 0 | INIT(this, |
488 | 0 | .public = { |
489 | 0 | .wait = _wait_, |
490 | 0 | .timed_wait = _timed_wait, |
491 | 0 | .timed_wait_abs = _timed_wait_abs, |
492 | 0 | .signal = _signal_, |
493 | 0 | .broadcast = _broadcast, |
494 | 0 | .destroy = _condvar_destroy, |
495 | 0 | }, |
496 | 0 | .mutex = mutex_create(MUTEX_TYPE_DEFAULT), |
497 | 0 | .condvar = condvar_create(CONDVAR_TYPE_DEFAULT), |
498 | 0 | ); |
499 | 0 | return &this->public; |
500 | 0 | } |