/src/openvswitch/lib/seq.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2013, 2014 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | |
19 | | #include "seq.h" |
20 | | |
21 | | #include <stdbool.h> |
22 | | |
23 | | #include "coverage.h" |
24 | | #include "hash.h" |
25 | | #include "openvswitch/hmap.h" |
26 | | #include "latch.h" |
27 | | #include "openvswitch/list.h" |
28 | | #include "ovs-thread.h" |
29 | | #include "openvswitch/poll-loop.h" |
30 | | |
31 | | COVERAGE_DEFINE(seq_change); |
32 | | |
33 | | /* A sequence number object. */ |
34 | | struct seq { |
35 | | uint64_t value OVS_GUARDED; |
36 | | struct hmap waiters OVS_GUARDED; /* Contains 'struct seq_waiter's. */ |
37 | | }; |
38 | | |
39 | | /* A thread waiting on a particular seq. */ |
40 | | struct seq_waiter { |
41 | | struct hmap_node hmap_node OVS_GUARDED; /* In 'seq->waiters'. */ |
42 | | struct seq *seq OVS_GUARDED; /* Seq being waited for. */ |
43 | | unsigned int ovsthread_id OVS_GUARDED; /* Key in 'waiters' hmap. */ |
44 | | |
45 | | struct seq_thread *thread OVS_GUARDED; /* Thread preparing to wait. */ |
46 | | struct ovs_list list_node OVS_GUARDED; /* In 'thread->waiters'. */ |
47 | | |
48 | | uint64_t value OVS_GUARDED; /* seq->value we're waiting to change. */ |
49 | | }; |
50 | | |
51 | | /* A thread that might be waiting on one or more seqs. */ |
52 | | struct seq_thread { |
53 | | struct ovs_list waiters OVS_GUARDED; /* Contains 'struct seq_waiter's. */ |
54 | | struct latch latch OVS_GUARDED; /* Wakeup latch for this thread. */ |
55 | | bool waiting OVS_GUARDED; /* True if latch_wait() already called. */ |
56 | | }; |
57 | | |
58 | | static struct ovs_mutex seq_mutex = OVS_MUTEX_INITIALIZER; |
59 | | |
60 | | static uint64_t seq_next OVS_GUARDED_BY(seq_mutex) = 1; |
61 | | |
62 | | static pthread_key_t seq_thread_key; |
63 | | |
64 | | static void seq_init(void); |
65 | | static struct seq_thread *seq_thread_get(void) OVS_REQUIRES(seq_mutex); |
66 | | static void seq_thread_exit(void *thread_) OVS_EXCLUDED(seq_mutex); |
67 | | static void seq_thread_woke(struct seq_thread *) OVS_REQUIRES(seq_mutex); |
68 | | static void seq_waiter_destroy(struct seq_waiter *) OVS_REQUIRES(seq_mutex); |
69 | | static void seq_wake_waiters(struct seq *) OVS_REQUIRES(seq_mutex); |
70 | | |
71 | | /* Creates and returns a new 'seq' object. */ |
72 | | struct seq * OVS_EXCLUDED(seq_mutex) |
73 | | seq_create(void) |
74 | 0 | { |
75 | 0 | struct seq *seq; |
76 | |
|
77 | 0 | seq_init(); |
78 | |
|
79 | 0 | seq = xmalloc(sizeof *seq); |
80 | |
|
81 | 0 | COVERAGE_INC(seq_change); |
82 | |
|
83 | 0 | ovs_mutex_lock(&seq_mutex); |
84 | 0 | seq->value = seq_next++; |
85 | 0 | hmap_init(&seq->waiters); |
86 | 0 | ovs_mutex_unlock(&seq_mutex); |
87 | |
|
88 | 0 | return seq; |
89 | 0 | } |
90 | | |
91 | | /* Destroys 'seq', waking up threads that were waiting on it, if any. */ |
92 | | void |
93 | | seq_destroy(struct seq *seq) |
94 | | OVS_EXCLUDED(seq_mutex) |
95 | 0 | { |
96 | 0 | ovs_mutex_lock(&seq_mutex); |
97 | 0 | seq_wake_waiters(seq); |
98 | 0 | hmap_destroy(&seq->waiters); |
99 | 0 | free(seq); |
100 | 0 | ovs_mutex_unlock(&seq_mutex); |
101 | 0 | } |
102 | | |
103 | | int |
104 | | seq_try_lock(void) |
105 | 0 | { |
106 | 0 | return ovs_mutex_trylock(&seq_mutex); |
107 | 0 | } |
108 | | |
109 | | void |
110 | | seq_lock(void) |
111 | | OVS_ACQUIRES(seq_mutex) |
112 | 0 | { |
113 | 0 | ovs_mutex_lock(&seq_mutex); |
114 | 0 | } |
115 | | |
116 | | void |
117 | | seq_unlock(void) |
118 | | OVS_RELEASES(seq_mutex) |
119 | 0 | { |
120 | 0 | ovs_mutex_unlock(&seq_mutex); |
121 | 0 | } |
122 | | |
123 | | /* Increments 'seq''s sequence number, waking up any threads that are waiting |
124 | | * on 'seq'. */ |
125 | | void |
126 | | seq_change_protected(struct seq *seq) |
127 | | OVS_REQUIRES(seq_mutex) |
128 | 0 | { |
129 | 0 | COVERAGE_INC(seq_change); |
130 | |
|
131 | 0 | seq->value = seq_next++; |
132 | 0 | seq_wake_waiters(seq); |
133 | 0 | } |
134 | | |
135 | | /* Increments 'seq''s sequence number, waking up any threads that are waiting |
136 | | * on 'seq'. */ |
137 | | void |
138 | | seq_change(struct seq *seq) |
139 | | OVS_EXCLUDED(seq_mutex) |
140 | 0 | { |
141 | 0 | ovs_mutex_lock(&seq_mutex); |
142 | 0 | seq_change_protected(seq); |
143 | 0 | ovs_mutex_unlock(&seq_mutex); |
144 | 0 | } |
145 | | |
146 | | /* Returns 'seq''s current sequence number (which could change immediately). |
147 | | * |
148 | | * seq_read() and seq_wait() can be used together to yield a race-free wakeup |
149 | | * when an object changes, even without an ability to lock the object. See |
150 | | * Usage in seq.h for details. */ |
151 | | uint64_t |
152 | | seq_read_protected(const struct seq *seq) |
153 | | OVS_REQUIRES(seq_mutex) |
154 | 0 | { |
155 | 0 | return seq->value; |
156 | 0 | } |
157 | | |
158 | | /* Returns 'seq''s current sequence number (which could change immediately). |
159 | | * |
160 | | * seq_read() and seq_wait() can be used together to yield a race-free wakeup |
161 | | * when an object changes, even without an ability to lock the object. See |
162 | | * Usage in seq.h for details. */ |
163 | | uint64_t |
164 | | seq_read(const struct seq *seq) |
165 | | OVS_EXCLUDED(seq_mutex) |
166 | 0 | { |
167 | 0 | uint64_t value; |
168 | |
|
169 | 0 | ovs_mutex_lock(&seq_mutex); |
170 | 0 | value = seq_read_protected(seq); |
171 | 0 | ovs_mutex_unlock(&seq_mutex); |
172 | |
|
173 | 0 | return value; |
174 | 0 | } |
175 | | |
176 | | static void |
177 | | seq_wait__(struct seq *seq, uint64_t value, const char *where) |
178 | | OVS_REQUIRES(seq_mutex) |
179 | 0 | { |
180 | 0 | unsigned int id = ovsthread_id_self(); |
181 | 0 | uint32_t hash = hash_int(id, 0); |
182 | 0 | struct seq_waiter *waiter; |
183 | |
|
184 | 0 | HMAP_FOR_EACH_IN_BUCKET (waiter, hmap_node, hash, &seq->waiters) { |
185 | 0 | if (waiter->ovsthread_id == id) { |
186 | 0 | if (waiter->value != value) { |
187 | | /* The current value is different from the value we've already |
188 | | * waited for, */ |
189 | 0 | poll_immediate_wake_at(where); |
190 | 0 | } else { |
191 | | /* Already waiting on 'value', nothing more to do. */ |
192 | 0 | } |
193 | 0 | return; |
194 | 0 | } |
195 | 0 | } |
196 | | |
197 | 0 | waiter = xmalloc(sizeof *waiter); |
198 | 0 | waiter->seq = seq; |
199 | 0 | hmap_insert(&seq->waiters, &waiter->hmap_node, hash); |
200 | 0 | waiter->ovsthread_id = id; |
201 | 0 | waiter->value = value; |
202 | 0 | waiter->thread = seq_thread_get(); |
203 | 0 | ovs_list_push_back(&waiter->thread->waiters, &waiter->list_node); |
204 | |
|
205 | 0 | if (!waiter->thread->waiting) { |
206 | 0 | latch_wait_at(&waiter->thread->latch, where); |
207 | 0 | waiter->thread->waiting = true; |
208 | 0 | } |
209 | 0 | } |
210 | | |
211 | | /* Causes the following poll_block() to wake up when 'seq''s sequence number |
212 | | * changes from 'value'. (If 'seq''s sequence number isn't 'value', then |
213 | | * poll_block() won't block at all.) |
214 | | * |
215 | | * seq_read() and seq_wait() can be used together to yield a race-free wakeup |
216 | | * when an object changes, even without an ability to lock the object. See |
217 | | * Usage in seq.h for details. |
218 | | * |
219 | | * ('where' is used in debug logging. Commonly one would use seq_wait() to |
220 | | * automatically provide the caller's source file and line number for |
221 | | * 'where'.) */ |
222 | | void |
223 | | seq_wait_at(const struct seq *seq_, uint64_t value, const char *where) |
224 | | OVS_EXCLUDED(seq_mutex) |
225 | 0 | { |
226 | 0 | struct seq *seq = CONST_CAST(struct seq *, seq_); |
227 | |
|
228 | 0 | ovs_mutex_lock(&seq_mutex); |
229 | 0 | if (value == seq->value) { |
230 | 0 | seq_wait__(seq, value, where); |
231 | 0 | } else { |
232 | 0 | poll_immediate_wake_at(where); |
233 | 0 | } |
234 | 0 | ovs_mutex_unlock(&seq_mutex); |
235 | 0 | } |
236 | | |
237 | | /* Called by poll_block() just before it returns, this function destroys any |
238 | | * seq_waiter objects associated with the current thread. */ |
239 | | void |
240 | | seq_woke(void) |
241 | | OVS_EXCLUDED(seq_mutex) |
242 | 0 | { |
243 | 0 | struct seq_thread *thread; |
244 | |
|
245 | 0 | seq_init(); |
246 | |
|
247 | 0 | thread = pthread_getspecific(seq_thread_key); |
248 | 0 | if (thread) { |
249 | 0 | ovs_mutex_lock(&seq_mutex); |
250 | 0 | seq_thread_woke(thread); |
251 | 0 | thread->waiting = false; |
252 | 0 | ovs_mutex_unlock(&seq_mutex); |
253 | 0 | } |
254 | 0 | } |
255 | | |
256 | | static void |
257 | | seq_init(void) |
258 | 0 | { |
259 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
260 | |
|
261 | 0 | if (ovsthread_once_start(&once)) { |
262 | 0 | xpthread_key_create(&seq_thread_key, seq_thread_exit); |
263 | 0 | ovsthread_once_done(&once); |
264 | 0 | } |
265 | 0 | } |
266 | | |
267 | | static struct seq_thread * |
268 | | seq_thread_get(void) |
269 | | OVS_REQUIRES(seq_mutex) |
270 | 0 | { |
271 | 0 | struct seq_thread *thread = pthread_getspecific(seq_thread_key); |
272 | 0 | if (!thread) { |
273 | 0 | thread = xmalloc(sizeof *thread); |
274 | 0 | ovs_list_init(&thread->waiters); |
275 | 0 | latch_init(&thread->latch); |
276 | 0 | thread->waiting = false; |
277 | |
|
278 | 0 | xpthread_setspecific(seq_thread_key, thread); |
279 | 0 | } |
280 | 0 | return thread; |
281 | 0 | } |
282 | | |
283 | | static void |
284 | | seq_thread_exit(void *thread_) |
285 | | OVS_EXCLUDED(seq_mutex) |
286 | 0 | { |
287 | 0 | struct seq_thread *thread = thread_; |
288 | |
|
289 | 0 | ovs_mutex_lock(&seq_mutex); |
290 | 0 | seq_thread_woke(thread); |
291 | 0 | latch_destroy(&thread->latch); |
292 | 0 | free(thread); |
293 | 0 | ovs_mutex_unlock(&seq_mutex); |
294 | 0 | } |
295 | | |
296 | | static void |
297 | | seq_thread_woke(struct seq_thread *thread) |
298 | | OVS_REQUIRES(seq_mutex) |
299 | 0 | { |
300 | 0 | struct seq_waiter *waiter; |
301 | |
|
302 | 0 | LIST_FOR_EACH_SAFE (waiter, list_node, &thread->waiters) { |
303 | 0 | ovs_assert(waiter->thread == thread); |
304 | 0 | seq_waiter_destroy(waiter); |
305 | 0 | } |
306 | 0 | latch_poll(&thread->latch); |
307 | 0 | } |
308 | | |
309 | | static void |
310 | | seq_waiter_destroy(struct seq_waiter *waiter) |
311 | | OVS_REQUIRES(seq_mutex) |
312 | 0 | { |
313 | 0 | hmap_remove(&waiter->seq->waiters, &waiter->hmap_node); |
314 | 0 | ovs_list_remove(&waiter->list_node); |
315 | 0 | free(waiter); |
316 | 0 | } |
317 | | |
318 | | static void |
319 | | seq_wake_waiters(struct seq *seq) |
320 | | OVS_REQUIRES(seq_mutex) |
321 | 0 | { |
322 | 0 | struct seq_waiter *waiter; |
323 | |
|
324 | 0 | HMAP_FOR_EACH_SAFE (waiter, hmap_node, &seq->waiters) { |
325 | 0 | latch_set(&waiter->thread->latch); |
326 | 0 | seq_waiter_destroy(waiter); |
327 | 0 | } |
328 | 0 | } |