/src/openvswitch/lib/seq.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2013, 2014 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | |
19 | | #include "seq.h" |
20 | | |
21 | | #include <stdbool.h> |
22 | | |
23 | | #include "coverage.h" |
24 | | #include "hash.h" |
25 | | #include "openvswitch/hmap.h" |
26 | | #include "latch.h" |
27 | | #include "openvswitch/list.h" |
28 | | #include "ovs-thread.h" |
29 | | #include "openvswitch/poll-loop.h" |
30 | | |
31 | | COVERAGE_DEFINE(seq_change); |
32 | | |
33 | | /* A sequence number object. */ |
34 | | struct seq { |
35 | | atomic_uint64_t value; |
36 | | struct hmap waiters OVS_GUARDED; /* Contains 'struct seq_waiter's. */ |
37 | | }; |
38 | | |
39 | | /* A thread waiting on a particular seq. */ |
40 | | struct seq_waiter { |
41 | | struct hmap_node hmap_node OVS_GUARDED; /* In 'seq->waiters'. */ |
42 | | struct seq *seq OVS_GUARDED; /* Seq being waited for. */ |
43 | | unsigned int ovsthread_id OVS_GUARDED; /* Key in 'waiters' hmap. */ |
44 | | |
45 | | struct seq_thread *thread OVS_GUARDED; /* Thread preparing to wait. */ |
46 | | struct ovs_list list_node OVS_GUARDED; /* In 'thread->waiters'. */ |
47 | | |
48 | | uint64_t value OVS_GUARDED; /* seq->value we're waiting to change. */ |
49 | | }; |
50 | | |
51 | | /* A thread that might be waiting on one or more seqs. */ |
52 | | struct seq_thread { |
53 | | struct ovs_list waiters OVS_GUARDED; /* Contains 'struct seq_waiter's. */ |
54 | | struct latch latch OVS_GUARDED; /* Wakeup latch for this thread. */ |
55 | | bool waiting OVS_GUARDED; /* True if latch_wait() already called. */ |
56 | | }; |
57 | | |
58 | | static struct ovs_mutex seq_mutex = OVS_MUTEX_INITIALIZER; |
59 | | |
60 | | static uint64_t seq_next OVS_GUARDED_BY(seq_mutex) = 1; |
61 | | |
62 | | static pthread_key_t seq_thread_key; |
63 | | |
64 | | static void seq_init(void); |
65 | | static struct seq_thread *seq_thread_get(void) OVS_REQUIRES(seq_mutex); |
66 | | static void seq_thread_exit(void *thread_) OVS_EXCLUDED(seq_mutex); |
67 | | static void seq_thread_woke(struct seq_thread *) OVS_REQUIRES(seq_mutex); |
68 | | static void seq_waiter_destroy(struct seq_waiter *) OVS_REQUIRES(seq_mutex); |
69 | | static void seq_wake_waiters(struct seq *) OVS_REQUIRES(seq_mutex); |
70 | | |
71 | | /* Creates and returns a new 'seq' object. */ |
72 | | struct seq * OVS_EXCLUDED(seq_mutex) |
73 | | seq_create(void) |
74 | 0 | { |
75 | 0 | uint64_t seq_value; |
76 | 0 | struct seq *seq; |
77 | |
|
78 | 0 | seq_init(); |
79 | |
|
80 | 0 | seq = xmalloc(sizeof *seq); |
81 | |
|
82 | 0 | COVERAGE_INC(seq_change); |
83 | |
|
84 | 0 | ovs_mutex_lock(&seq_mutex); |
85 | 0 | seq_value = seq_next++; |
86 | 0 | atomic_store_relaxed(&seq->value, seq_value); |
87 | 0 | hmap_init(&seq->waiters); |
88 | 0 | ovs_mutex_unlock(&seq_mutex); |
89 | |
|
90 | 0 | return seq; |
91 | 0 | } |
92 | | |
93 | | /* Destroys 'seq', waking up threads that were waiting on it, if any. */ |
94 | | void |
95 | | seq_destroy(struct seq *seq) |
96 | | OVS_EXCLUDED(seq_mutex) |
97 | 0 | { |
98 | 0 | ovs_mutex_lock(&seq_mutex); |
99 | 0 | seq_wake_waiters(seq); |
100 | 0 | hmap_destroy(&seq->waiters); |
101 | 0 | free(seq); |
102 | 0 | ovs_mutex_unlock(&seq_mutex); |
103 | 0 | } |
104 | | |
105 | | int |
106 | | seq_try_lock(void) |
107 | 0 | { |
108 | 0 | return ovs_mutex_trylock(&seq_mutex); |
109 | 0 | } |
110 | | |
111 | | void |
112 | | seq_lock(void) |
113 | | OVS_ACQUIRES(seq_mutex) |
114 | 0 | { |
115 | 0 | ovs_mutex_lock(&seq_mutex); |
116 | 0 | } |
117 | | |
118 | | void |
119 | | seq_unlock(void) |
120 | | OVS_RELEASES(seq_mutex) |
121 | 0 | { |
122 | 0 | ovs_mutex_unlock(&seq_mutex); |
123 | 0 | } |
124 | | |
125 | | /* Increments 'seq''s sequence number, waking up any threads that are waiting |
126 | | * on 'seq'. */ |
127 | | void |
128 | | seq_change_protected(struct seq *seq) |
129 | | OVS_REQUIRES(seq_mutex) |
130 | 0 | { |
131 | 0 | uint64_t seq_value = seq_next++; |
132 | |
|
133 | 0 | COVERAGE_INC(seq_change); |
134 | |
|
135 | 0 | atomic_store_explicit(&seq->value, seq_value, memory_order_release); |
136 | 0 | seq_wake_waiters(seq); |
137 | 0 | } |
138 | | |
139 | | /* Increments 'seq''s sequence number, waking up any threads that are waiting |
140 | | * on 'seq'. */ |
141 | | void |
142 | | seq_change(struct seq *seq) |
143 | | OVS_EXCLUDED(seq_mutex) |
144 | 0 | { |
145 | 0 | ovs_mutex_lock(&seq_mutex); |
146 | 0 | seq_change_protected(seq); |
147 | 0 | ovs_mutex_unlock(&seq_mutex); |
148 | 0 | } |
149 | | |
150 | | /* Returns 'seq''s current sequence number (which could change immediately). |
151 | | * |
152 | | * seq_read() and seq_wait() can be used together to yield a race-free wakeup |
153 | | * when an object changes, even without an ability to lock the object. See |
154 | | * Usage in seq.h for details. */ |
155 | | uint64_t |
156 | | seq_read(const struct seq *seq) |
157 | 0 | { |
158 | 0 | uint64_t value; |
159 | | |
160 | | /* Note that the odd CONST_CAST() is here to keep sparse happy. */ |
161 | 0 | atomic_read_explicit(&CONST_CAST(struct seq *, seq)->value, &value, |
162 | 0 | memory_order_acquire); |
163 | 0 | return value; |
164 | 0 | } |
165 | | |
166 | | /* poll_immediate_wake_at() must not be called while holding seq_mutex |
167 | | * in order to avoid potential deadlock with time_init() that calls |
168 | | * seq_create() if the timeval module is not initialized yet. */ |
169 | | void poll_immediate_wake_at(const char *where) OVS_EXCLUDED(seq_mutex); |
170 | | |
171 | | static void |
172 | | seq_wait__(struct seq *seq, uint64_t value, const char *where) |
173 | | OVS_RELEASES(seq_mutex) |
174 | 0 | { |
175 | 0 | unsigned int id = ovsthread_id_self(); |
176 | 0 | uint32_t hash = hash_int(id, 0); |
177 | 0 | struct seq_waiter *waiter; |
178 | |
|
179 | 0 | HMAP_FOR_EACH_IN_BUCKET (waiter, hmap_node, hash, &seq->waiters) { |
180 | 0 | if (waiter->ovsthread_id == id) { |
181 | 0 | if (waiter->value != value) { |
182 | | /* The current value is different from the value we've already |
183 | | * waited for, */ |
184 | 0 | ovs_mutex_unlock(&seq_mutex); |
185 | 0 | poll_immediate_wake_at(where); |
186 | 0 | } else { |
187 | | /* Already waiting on 'value', nothing more to do. */ |
188 | 0 | ovs_mutex_unlock(&seq_mutex); |
189 | 0 | } |
190 | 0 | return; |
191 | 0 | } |
192 | 0 | } |
193 | | |
194 | 0 | waiter = xmalloc(sizeof *waiter); |
195 | 0 | waiter->seq = seq; |
196 | 0 | hmap_insert(&seq->waiters, &waiter->hmap_node, hash); |
197 | 0 | waiter->ovsthread_id = id; |
198 | 0 | waiter->value = value; |
199 | 0 | waiter->thread = seq_thread_get(); |
200 | 0 | ovs_list_push_back(&waiter->thread->waiters, &waiter->list_node); |
201 | |
|
202 | 0 | if (!waiter->thread->waiting) { |
203 | 0 | latch_wait_at(&waiter->thread->latch, where); |
204 | 0 | waiter->thread->waiting = true; |
205 | 0 | } |
206 | 0 | ovs_mutex_unlock(&seq_mutex); |
207 | 0 | } |
208 | | |
209 | | /* Causes the following poll_block() to wake up when 'seq''s sequence number |
210 | | * changes from 'value'. (If 'seq''s sequence number isn't 'value', then |
211 | | * poll_block() won't block at all.) |
212 | | * |
213 | | * seq_read() and seq_wait() can be used together to yield a race-free wakeup |
214 | | * when an object changes, even without an ability to lock the object. See |
215 | | * Usage in seq.h for details. |
216 | | * |
217 | | * ('where' is used in debug logging. Commonly one would use seq_wait() to |
218 | | * automatically provide the caller's source file and line number for |
219 | | * 'where'.) */ |
220 | | void |
221 | | seq_wait_at(const struct seq *seq_, uint64_t value, const char *where) |
222 | | OVS_EXCLUDED(seq_mutex) |
223 | 0 | { |
224 | 0 | struct seq *seq = CONST_CAST(struct seq *, seq_); |
225 | |
|
226 | 0 | ovs_mutex_lock(&seq_mutex); |
227 | 0 | if (value == seq_read(seq_)) { |
228 | 0 | seq_wait__(seq, value, where); |
229 | 0 | } else { |
230 | 0 | ovs_mutex_unlock(&seq_mutex); |
231 | 0 | poll_immediate_wake_at(where); |
232 | 0 | } |
233 | 0 | } |
234 | | |
235 | | /* Called by poll_block() just before it returns, this function destroys any |
236 | | * seq_waiter objects associated with the current thread. */ |
237 | | void |
238 | | seq_woke(void) |
239 | | OVS_EXCLUDED(seq_mutex) |
240 | 0 | { |
241 | 0 | struct seq_thread *thread; |
242 | |
|
243 | 0 | seq_init(); |
244 | |
|
245 | 0 | thread = pthread_getspecific(seq_thread_key); |
246 | 0 | if (thread) { |
247 | 0 | ovs_mutex_lock(&seq_mutex); |
248 | 0 | seq_thread_woke(thread); |
249 | 0 | thread->waiting = false; |
250 | 0 | ovs_mutex_unlock(&seq_mutex); |
251 | 0 | } |
252 | 0 | } |
253 | | |
254 | | static void |
255 | | seq_init(void) |
256 | 0 | { |
257 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
258 | |
|
259 | 0 | if (ovsthread_once_start(&once)) { |
260 | 0 | xpthread_key_create(&seq_thread_key, seq_thread_exit); |
261 | 0 | ovsthread_once_done(&once); |
262 | 0 | } |
263 | 0 | } |
264 | | |
265 | | static struct seq_thread * |
266 | | seq_thread_get(void) |
267 | | OVS_REQUIRES(seq_mutex) |
268 | 0 | { |
269 | 0 | struct seq_thread *thread = pthread_getspecific(seq_thread_key); |
270 | 0 | if (!thread) { |
271 | 0 | thread = xmalloc(sizeof *thread); |
272 | 0 | ovs_list_init(&thread->waiters); |
273 | 0 | latch_init(&thread->latch); |
274 | 0 | thread->waiting = false; |
275 | |
|
276 | 0 | xpthread_setspecific(seq_thread_key, thread); |
277 | 0 | } |
278 | 0 | return thread; |
279 | 0 | } |
280 | | |
281 | | static void |
282 | | seq_thread_exit(void *thread_) |
283 | | OVS_EXCLUDED(seq_mutex) |
284 | 0 | { |
285 | 0 | struct seq_thread *thread = thread_; |
286 | |
|
287 | 0 | ovs_mutex_lock(&seq_mutex); |
288 | 0 | seq_thread_woke(thread); |
289 | 0 | latch_destroy(&thread->latch); |
290 | 0 | free(thread); |
291 | 0 | ovs_mutex_unlock(&seq_mutex); |
292 | 0 | } |
293 | | |
294 | | static void |
295 | | seq_thread_woke(struct seq_thread *thread) |
296 | | OVS_REQUIRES(seq_mutex) |
297 | 0 | { |
298 | 0 | struct seq_waiter *waiter; |
299 | |
|
300 | 0 | LIST_FOR_EACH_SAFE (waiter, list_node, &thread->waiters) { |
301 | 0 | ovs_assert(waiter->thread == thread); |
302 | 0 | seq_waiter_destroy(waiter); |
303 | 0 | } |
304 | 0 | latch_poll(&thread->latch); |
305 | 0 | } |
306 | | |
307 | | static void |
308 | | seq_waiter_destroy(struct seq_waiter *waiter) |
309 | | OVS_REQUIRES(seq_mutex) |
310 | 0 | { |
311 | 0 | hmap_remove(&waiter->seq->waiters, &waiter->hmap_node); |
312 | 0 | ovs_list_remove(&waiter->list_node); |
313 | 0 | free(waiter); |
314 | 0 | } |
315 | | |
316 | | static void |
317 | | seq_wake_waiters(struct seq *seq) |
318 | | OVS_REQUIRES(seq_mutex) |
319 | 0 | { |
320 | 0 | struct seq_waiter *waiter; |
321 | |
|
322 | 0 | HMAP_FOR_EACH_SAFE (waiter, hmap_node, &seq->waiters) { |
323 | 0 | latch_set(&waiter->thread->latch); |
324 | 0 | seq_waiter_destroy(waiter); |
325 | 0 | } |
326 | 0 | } |