/src/openvswitch/lib/fat-rwlock.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2013, 2014 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | |
19 | | #include "fat-rwlock.h" |
20 | | |
21 | | #include <errno.h> |
22 | | |
23 | | #include "openvswitch/hmap.h" |
24 | | #include "openvswitch/list.h" |
25 | | #include "ovs-thread.h" |
26 | | #include "random.h" |
27 | | |
28 | | struct fat_rwlock_slot { |
29 | | /* Membership in rwlock's list of "struct fat_rwlock_slot"s. |
30 | | * |
31 | | * fat_rwlock_destroy() sets 'rwlock' to NULL to indicate that this |
32 | | * slot may be destroyed. */ |
33 | | struct ovs_list list_node; /* In struct rwlock's 'threads' list. */ |
34 | | struct fat_rwlock *rwlock; /* Owner. */ |
35 | | |
36 | | /* Mutex. |
37 | | * |
38 | | * A thread holding the read-lock holds its own mutex. |
39 | | * |
40 | | * A thread holding the write-lock holds every thread's mutex, plus |
41 | | * 'rwlock->mutex'. */ |
42 | | struct ovs_mutex mutex; |
43 | | |
44 | | /* This thread's locking status for 'rwlock': |
45 | | * |
46 | | * - 0: This thread does not have any lock on 'rwlock'. This thread |
47 | | * does not have 'mutex' locked. |
48 | | * |
49 | | * - 1: This thread has a read-lock on 'rwlock' and holds 'mutex'. |
50 | | * |
51 | | * - 2...UINT_MAX-1: This thread has recursively taken the read-lock on |
52 | | * 'rwlock' to the level of 'depth'. This thread holds 'mutex'. |
53 | | * |
54 | | * - UINT_MAX: This thread has the write-lock on 'rwlock' and holds |
55 | | * 'mutex' (plus the 'mutex' of all of 'rwlock''s other slots). |
56 | | * |
57 | | * Accessed only by the slot's own thread, so no synchronization is |
58 | | * needed. */ |
59 | | unsigned int depth; |
60 | | }; |
61 | | |
62 | | static void |
63 | | free_slot(struct fat_rwlock_slot *slot) |
64 | 0 | { |
65 | 0 | if (slot->depth) { |
66 | 0 | abort(); |
67 | 0 | } |
68 | | |
69 | 0 | ovs_list_remove(&slot->list_node); |
70 | 0 | free_cacheline(slot); |
71 | 0 | } |
72 | | |
73 | | static void |
74 | | slot_destructor(void *slot_) |
75 | 0 | { |
76 | 0 | struct fat_rwlock_slot *slot = slot_; |
77 | 0 | struct fat_rwlock *rwlock = slot->rwlock; |
78 | |
|
79 | 0 | ovs_mutex_lock(&rwlock->mutex); |
80 | 0 | free_slot(slot); |
81 | 0 | ovs_mutex_unlock(&rwlock->mutex); |
82 | 0 | } |
83 | | |
84 | | /* Initialize 'rwlock' as a new fat_rwlock. */ |
85 | | void |
86 | | fat_rwlock_init(struct fat_rwlock *rwlock) |
87 | 0 | { |
88 | 0 | ovsthread_key_create(&rwlock->key, slot_destructor); |
89 | 0 | ovs_mutex_init(&rwlock->mutex); |
90 | 0 | ovs_mutex_lock(&rwlock->mutex); |
91 | 0 | ovs_list_init(&rwlock->threads); |
92 | 0 | ovs_mutex_unlock(&rwlock->mutex); |
93 | 0 | } |
94 | | |
95 | | /* Destroys 'rwlock', which must not be locked or otherwise in use by any |
96 | | * thread. */ |
97 | | void |
98 | | fat_rwlock_destroy(struct fat_rwlock *rwlock) |
99 | 0 | { |
100 | 0 | struct fat_rwlock_slot *slot; |
101 | | |
102 | | /* Order is important here. By destroying the thread-specific data first, |
103 | | * before we destroy the slots, we ensure that the thread-specific |
104 | | * data destructor can't race with our loop below. */ |
105 | 0 | ovsthread_key_delete(rwlock->key); |
106 | |
|
107 | 0 | LIST_FOR_EACH_SAFE (slot, list_node, &rwlock->threads) { |
108 | 0 | free_slot(slot); |
109 | 0 | } |
110 | 0 | ovs_mutex_destroy(&rwlock->mutex); |
111 | 0 | } |
112 | | |
113 | | static struct fat_rwlock_slot * |
114 | | fat_rwlock_get_slot__(struct fat_rwlock *rwlock) |
115 | 0 | { |
116 | 0 | struct fat_rwlock_slot *slot; |
117 | | |
118 | | /* Fast path. */ |
119 | 0 | slot = ovsthread_getspecific(rwlock->key); |
120 | 0 | if (slot) { |
121 | 0 | return slot; |
122 | 0 | } |
123 | | |
124 | | /* Slow path: create a new slot for 'rwlock' in this thread. */ |
125 | | |
126 | 0 | slot = xmalloc_cacheline(sizeof *slot); |
127 | 0 | slot->rwlock = rwlock; |
128 | 0 | ovs_mutex_init(&slot->mutex); |
129 | 0 | slot->depth = 0; |
130 | |
|
131 | 0 | ovs_mutex_lock(&rwlock->mutex); |
132 | 0 | ovs_list_push_back(&rwlock->threads, &slot->list_node); |
133 | 0 | ovs_mutex_unlock(&rwlock->mutex); |
134 | |
|
135 | 0 | ovsthread_setspecific(rwlock->key, slot); |
136 | |
|
137 | 0 | return slot; |
138 | 0 | } |
139 | | |
140 | | /* Locks 'rwlock' for reading. The read-lock is recursive: it may be acquired |
141 | | * any number of times by a single thread (which must then release it the same |
142 | | * number of times for it to truly be released). */ |
143 | | void |
144 | | fat_rwlock_rdlock(const struct fat_rwlock *rwlock_) |
145 | | OVS_ACQ_RDLOCK(rwlock_) |
146 | | OVS_NO_THREAD_SAFETY_ANALYSIS |
147 | 0 | { |
148 | 0 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); |
149 | 0 | struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock); |
150 | |
|
151 | 0 | switch (this->depth) { |
152 | 0 | case UINT_MAX: |
153 | | /* This thread already holds the write-lock. */ |
154 | 0 | abort(); |
155 | | |
156 | 0 | case 0: |
157 | 0 | ovs_mutex_lock(&this->mutex); |
158 | | /* fall through */ |
159 | 0 | default: |
160 | 0 | this->depth++; |
161 | 0 | break; |
162 | 0 | } |
163 | 0 | } |
164 | | |
165 | | static struct fat_rwlock_slot * |
166 | | fat_rwlock_try_get_slot__(struct fat_rwlock *rwlock) |
167 | 0 | { |
168 | 0 | struct fat_rwlock_slot *slot; |
169 | | |
170 | | /* Fast path. */ |
171 | 0 | slot = ovsthread_getspecific(rwlock->key); |
172 | 0 | if (slot) { |
173 | 0 | return slot; |
174 | 0 | } |
175 | | |
176 | | /* Slow path: create a new slot for 'rwlock' in this thread. */ |
177 | | |
178 | 0 | if (!ovs_mutex_trylock(&rwlock->mutex)) { |
179 | 0 | slot = xmalloc_cacheline(sizeof *slot); |
180 | 0 | slot->rwlock = rwlock; |
181 | 0 | ovs_mutex_init(&slot->mutex); |
182 | 0 | slot->depth = 0; |
183 | |
|
184 | 0 | ovs_list_push_back(&rwlock->threads, &slot->list_node); |
185 | 0 | ovs_mutex_unlock(&rwlock->mutex); |
186 | 0 | ovsthread_setspecific(rwlock->key, slot); |
187 | 0 | } |
188 | |
|
189 | 0 | return slot; |
190 | 0 | } |
191 | | |
192 | | /* Tries to lock 'rwlock' for reading. If successful, returns 0. If taking |
193 | | * the lock would require blocking, returns EBUSY (without blocking). */ |
194 | | int |
195 | | fat_rwlock_tryrdlock(const struct fat_rwlock *rwlock_) |
196 | | OVS_TRY_RDLOCK(0, rwlock_) |
197 | | OVS_NO_THREAD_SAFETY_ANALYSIS |
198 | 0 | { |
199 | 0 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); |
200 | 0 | struct fat_rwlock_slot *this = fat_rwlock_try_get_slot__(rwlock); |
201 | 0 | int error; |
202 | |
|
203 | 0 | if (!this) { |
204 | 0 | return EBUSY; |
205 | 0 | } |
206 | | |
207 | 0 | switch (this->depth) { |
208 | 0 | case UINT_MAX: |
209 | 0 | return EBUSY; |
210 | | |
211 | 0 | case 0: |
212 | 0 | error = ovs_mutex_trylock(&this->mutex); |
213 | 0 | if (error) { |
214 | 0 | return error; |
215 | 0 | } |
216 | | /* fall through */ |
217 | 0 | default: |
218 | 0 | this->depth++; |
219 | 0 | break; |
220 | 0 | } |
221 | | |
222 | 0 | return 0; |
223 | 0 | } |
224 | | |
225 | | /* Locks 'rwlock' for writing. |
226 | | * |
227 | | * The write lock is not recursive. */ |
228 | | void |
229 | | fat_rwlock_wrlock(const struct fat_rwlock *rwlock_) |
230 | | OVS_ACQ_WRLOCK(rwlock_) |
231 | | OVS_NO_THREAD_SAFETY_ANALYSIS |
232 | 0 | { |
233 | 0 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); |
234 | 0 | struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock); |
235 | 0 | struct fat_rwlock_slot *slot; |
236 | |
|
237 | 0 | ovs_assert(!this->depth); |
238 | 0 | this->depth = UINT_MAX; |
239 | |
|
240 | 0 | ovs_mutex_lock(&rwlock->mutex); |
241 | 0 | LIST_FOR_EACH (slot, list_node, &rwlock->threads) { |
242 | 0 | ovs_mutex_lock(&slot->mutex); |
243 | 0 | } |
244 | 0 | } |
245 | | |
246 | | /* Unlocks 'rwlock', which the current thread must have locked for reading or |
247 | | * for writing. If the read lock has been taken recursively, it must be |
248 | | * released the same number of times to be truly released. */ |
249 | | void |
250 | | fat_rwlock_unlock(const struct fat_rwlock *rwlock_) |
251 | | OVS_RELEASES(rwlock_) |
252 | | OVS_NO_THREAD_SAFETY_ANALYSIS |
253 | 0 | { |
254 | 0 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); |
255 | 0 | struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock); |
256 | 0 | struct fat_rwlock_slot *slot; |
257 | |
|
258 | 0 | switch (this->depth) { |
259 | 0 | case UINT_MAX: |
260 | 0 | LIST_FOR_EACH (slot, list_node, &rwlock->threads) { |
261 | 0 | ovs_mutex_unlock(&slot->mutex); |
262 | 0 | } |
263 | 0 | ovs_mutex_unlock(&rwlock->mutex); |
264 | 0 | this->depth = 0; |
265 | 0 | break; |
266 | | |
267 | 0 | case 0: |
268 | | /* This thread doesn't hold any lock. */ |
269 | 0 | abort(); |
270 | | |
271 | 0 | case 1: |
272 | 0 | ovs_mutex_unlock(&this->mutex); |
273 | | /* fall through */ |
274 | 0 | default: |
275 | 0 | this->depth--; |
276 | 0 | break; |
277 | 0 | } |
278 | 0 | } |