Line | Count | Source |
1 | | // SPDX-License-Identifier: ISC |
2 | | /* |
3 | | * Copyright (c) 2016-2018 David Lamparter, for NetDEF, Inc. |
4 | | */ |
5 | | |
6 | | #ifdef HAVE_CONFIG_H |
7 | | #include "config.h" |
8 | | #endif |
9 | | |
10 | | #include <assert.h> |
11 | | |
12 | | #include "atomlist.h" |
13 | | |
14 | | void atomlist_add_head(struct atomlist_head *h, struct atomlist_item *item) |
15 | 0 | { |
16 | 0 | atomptr_t prevval; |
17 | 0 | atomptr_t i = atomptr_i(item); |
18 | |
|
19 | 0 | atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed); |
20 | | |
21 | | /* updating ->last is possible here, but makes the code considerably |
22 | | * more complicated... let's not. |
23 | | */ |
24 | 0 | prevval = ATOMPTR_NULL; |
25 | 0 | item->next = ATOMPTR_NULL; |
26 | | |
27 | | /* head-insert atomically |
28 | | * release barrier: item + item->next writes must be completed |
29 | | */ |
30 | 0 | while (!atomic_compare_exchange_weak_explicit(&h->first, &prevval, i, |
31 | 0 | memory_order_release, memory_order_relaxed)) |
32 | 0 | atomic_store_explicit(&item->next, prevval, |
33 | 0 | memory_order_relaxed); |
34 | 0 | } |
35 | | |
36 | | void atomlist_add_tail(struct atomlist_head *h, struct atomlist_item *item) |
37 | 16 | { |
38 | 16 | atomptr_t prevval = ATOMPTR_NULL; |
39 | 16 | atomptr_t i = atomptr_i(item); |
40 | 16 | atomptr_t hint; |
41 | 16 | struct atomlist_item *prevptr; |
42 | 16 | _Atomic atomptr_t *prev; |
43 | | |
44 | 16 | item->next = ATOMPTR_NULL; |
45 | | |
46 | 16 | atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed); |
47 | | |
48 | | /* place new item into ->last |
49 | | * release: item writes completed; acquire: DD barrier on hint |
50 | | */ |
51 | 16 | hint = atomic_exchange_explicit(&h->last, i, memory_order_acq_rel); |
52 | | |
53 | 16 | while (1) { |
54 | 16 | if (atomptr_p(hint) == NULL) |
55 | 16 | prev = &h->first; |
56 | 0 | else |
57 | 0 | prev = &atomlist_itemp(hint)->next; |
58 | | |
59 | 16 | do { |
60 | 16 | prevval = atomic_load_explicit(prev, |
61 | 16 | memory_order_consume); |
62 | 16 | prevptr = atomlist_itemp(prevval); |
63 | 16 | if (prevptr == NULL) |
64 | 16 | break; |
65 | | |
66 | 0 | prev = &prevptr->next; |
67 | 0 | } while (prevptr); |
68 | | |
69 | | /* last item is being deleted - start over */ |
70 | 16 | if (atomptr_l(prevval)) { |
71 | 0 | hint = ATOMPTR_NULL; |
72 | 0 | continue; |
73 | 0 | } |
74 | | |
75 | | /* no barrier - item->next is NULL and was so in xchg above */ |
76 | 16 | if (!atomic_compare_exchange_strong_explicit(prev, &prevval, i, |
77 | 16 | memory_order_consume, |
78 | 16 | memory_order_consume)) { |
79 | 0 | hint = prevval; |
80 | 0 | continue; |
81 | 0 | } |
82 | 16 | break; |
83 | 16 | } |
84 | 16 | } |
85 | | |
86 | | static void atomlist_del_core(struct atomlist_head *h, |
87 | | struct atomlist_item *item, |
88 | | _Atomic atomptr_t *hint, |
89 | | atomptr_t next) |
90 | 0 | { |
91 | 0 | _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd; |
92 | 0 | atomptr_t prevval, updval; |
93 | 0 | struct atomlist_item *prevptr; |
94 | | |
95 | | /* drop us off "last" if needed. no r/w to barrier. */ |
96 | 0 | prevval = atomptr_i(item); |
97 | 0 | atomic_compare_exchange_strong_explicit(&h->last, &prevval, |
98 | 0 | ATOMPTR_NULL, |
99 | 0 | memory_order_relaxed, memory_order_relaxed); |
100 | |
|
101 | 0 | atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed); |
102 | | |
103 | | /* the following code should be identical (except sort<>list) to |
104 | | * atomsort_del_hint() |
105 | | */ |
106 | 0 | while (1) { |
107 | 0 | upd = NULL; |
108 | 0 | updval = ATOMPTR_LOCK; |
109 | |
|
110 | 0 | do { |
111 | 0 | prevval = atomic_load_explicit(prev, |
112 | 0 | memory_order_consume); |
113 | | |
114 | | /* track the beginning of a chain of deleted items |
115 | | * this is necessary to make this lock-free; we can |
116 | | * complete deletions started by other threads. |
117 | | */ |
118 | 0 | if (!atomptr_l(prevval)) { |
119 | 0 | updval = prevval; |
120 | 0 | upd = prev; |
121 | 0 | } |
122 | |
|
123 | 0 | prevptr = atomlist_itemp(prevval); |
124 | 0 | if (prevptr == item) |
125 | 0 | break; |
126 | | |
127 | 0 | prev = &prevptr->next; |
128 | 0 | } while (prevptr); |
129 | |
|
130 | 0 | if (prevptr != item) |
131 | | /* another thread completed our deletion */ |
132 | 0 | return; |
133 | | |
134 | 0 | if (!upd || atomptr_l(updval)) { |
135 | | /* failed to find non-deleted predecessor... |
136 | | * have to try again |
137 | | */ |
138 | 0 | prev = &h->first; |
139 | 0 | continue; |
140 | 0 | } |
141 | | |
142 | 0 | if (!atomic_compare_exchange_strong_explicit(upd, &updval, |
143 | 0 | next, memory_order_consume, |
144 | 0 | memory_order_consume)) { |
145 | | /* prev doesn't point to item anymore, something |
146 | | * was inserted. continue at same position forward. |
147 | | */ |
148 | 0 | continue; |
149 | 0 | } |
150 | 0 | break; |
151 | 0 | } |
152 | 0 | } |
153 | | |
154 | | void atomlist_del_hint(struct atomlist_head *h, struct atomlist_item *item, |
155 | | _Atomic atomptr_t *hint) |
156 | 0 | { |
157 | 0 | atomptr_t next; |
158 | | |
159 | | /* mark ourselves in-delete - full barrier */ |
160 | 0 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, |
161 | 0 | memory_order_acquire); |
162 | 0 | assert(!atomptr_l(next)); /* delete race on same item */ |
163 | |
|
164 | 0 | atomlist_del_core(h, item, hint, next); |
165 | 0 | } |
166 | | |
167 | | struct atomlist_item *atomlist_pop(struct atomlist_head *h) |
168 | 0 | { |
169 | 0 | struct atomlist_item *item; |
170 | 0 | atomptr_t next; |
171 | | |
172 | | /* grab head of the list - and remember it in replval for the |
173 | | * actual delete below. No matter what, the head of the list is |
174 | | * where we start deleting because either it's our item, or it's |
175 | | * some delete-marked items and then our item. |
176 | | */ |
177 | 0 | next = atomic_load_explicit(&h->first, memory_order_consume); |
178 | |
|
179 | 0 | do { |
180 | 0 | item = atomlist_itemp(next); |
181 | 0 | if (!item) |
182 | 0 | return NULL; |
183 | | |
184 | | /* try to mark deletion */ |
185 | 0 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, |
186 | 0 | memory_order_acquire); |
187 | |
|
188 | 0 | } while (atomptr_l(next)); |
189 | | /* if loop is taken: delete race on same item (another pop or del) |
190 | | * => proceed to next item |
191 | | * if loop exited here: we have our item selected and marked |
192 | | */ |
193 | 0 | atomlist_del_core(h, item, &h->first, next); |
194 | 0 | return item; |
195 | 0 | } |
196 | | |
197 | | struct atomsort_item *atomsort_add(struct atomsort_head *h, |
198 | | struct atomsort_item *item, int (*cmpfn)( |
199 | | const struct atomsort_item *, |
200 | | const struct atomsort_item *)) |
201 | 0 | { |
202 | 0 | _Atomic atomptr_t *prev; |
203 | 0 | atomptr_t prevval; |
204 | 0 | atomptr_t i = atomptr_i(item); |
205 | 0 | struct atomsort_item *previtem; |
206 | 0 | int cmpval; |
207 | |
|
208 | 0 | do { |
209 | 0 | prev = &h->first; |
210 | |
|
211 | 0 | do { |
212 | 0 | prevval = atomic_load_explicit(prev, |
213 | 0 | memory_order_acquire); |
214 | 0 | previtem = atomptr_p(prevval); |
215 | |
|
216 | 0 | if (!previtem || (cmpval = cmpfn(previtem, item)) > 0) |
217 | 0 | break; |
218 | 0 | if (cmpval == 0) |
219 | 0 | return previtem; |
220 | | |
221 | 0 | prev = &previtem->next; |
222 | 0 | } while (1); |
223 | | |
224 | 0 | if (atomptr_l(prevval)) |
225 | 0 | continue; |
226 | | |
227 | 0 | item->next = prevval; |
228 | 0 | if (atomic_compare_exchange_strong_explicit(prev, &prevval, i, |
229 | 0 | memory_order_release, memory_order_relaxed)) |
230 | 0 | break; |
231 | 0 | } while (1); |
232 | | |
233 | 0 | atomic_fetch_add_explicit(&h->count, 1, memory_order_relaxed); |
234 | 0 | return NULL; |
235 | 0 | } |
236 | | |
237 | | static void atomsort_del_core(struct atomsort_head *h, |
238 | | struct atomsort_item *item, _Atomic atomptr_t *hint, |
239 | | atomptr_t next) |
240 | 0 | { |
241 | 0 | _Atomic atomptr_t *prev = hint ? hint : &h->first, *upd; |
242 | 0 | atomptr_t prevval, updval; |
243 | 0 | struct atomsort_item *prevptr; |
244 | |
|
245 | 0 | atomic_fetch_sub_explicit(&h->count, 1, memory_order_relaxed); |
246 | | |
247 | | /* the following code should be identical (except sort<>list) to |
248 | | * atomlist_del_core() |
249 | | */ |
250 | 0 | while (1) { |
251 | 0 | upd = NULL; |
252 | 0 | updval = ATOMPTR_LOCK; |
253 | |
|
254 | 0 | do { |
255 | 0 | prevval = atomic_load_explicit(prev, |
256 | 0 | memory_order_consume); |
257 | | |
258 | | /* track the beginning of a chain of deleted items |
259 | | * this is necessary to make this lock-free; we can |
260 | | * complete deletions started by other threads. |
261 | | */ |
262 | 0 | if (!atomptr_l(prevval)) { |
263 | 0 | updval = prevval; |
264 | 0 | upd = prev; |
265 | 0 | } |
266 | |
|
267 | 0 | prevptr = atomsort_itemp(prevval); |
268 | 0 | if (prevptr == item) |
269 | 0 | break; |
270 | | |
271 | 0 | prev = &prevptr->next; |
272 | 0 | } while (prevptr); |
273 | |
|
274 | 0 | if (prevptr != item) |
275 | | /* another thread completed our deletion */ |
276 | 0 | return; |
277 | | |
278 | 0 | if (!upd || atomptr_l(updval)) { |
279 | | /* failed to find non-deleted predecessor... |
280 | | * have to try again |
281 | | */ |
282 | 0 | prev = &h->first; |
283 | 0 | continue; |
284 | 0 | } |
285 | | |
286 | 0 | if (!atomic_compare_exchange_strong_explicit(upd, &updval, |
287 | 0 | next, memory_order_relaxed, |
288 | 0 | memory_order_relaxed)) { |
289 | | /* prev doesn't point to item anymore, something |
290 | | * was inserted. continue at same position forward. |
291 | | */ |
292 | 0 | continue; |
293 | 0 | } |
294 | 0 | break; |
295 | 0 | } |
296 | 0 | } |
297 | | |
298 | | void atomsort_del_hint(struct atomsort_head *h, struct atomsort_item *item, |
299 | | _Atomic atomptr_t *hint) |
300 | 0 | { |
301 | 0 | atomptr_t next; |
302 | | |
303 | | /* mark ourselves in-delete - full barrier */ |
304 | 0 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, |
305 | 0 | memory_order_seq_cst); |
306 | 0 | assert(!atomptr_l(next)); /* delete race on same item */ |
307 | |
|
308 | 0 | atomsort_del_core(h, item, hint, next); |
309 | 0 | } |
310 | | |
311 | | struct atomsort_item *atomsort_pop(struct atomsort_head *h) |
312 | 0 | { |
313 | 0 | struct atomsort_item *item; |
314 | 0 | atomptr_t next; |
315 | | |
316 | | /* grab head of the list - and remember it in replval for the |
317 | | * actual delete below. No matter what, the head of the list is |
318 | | * where we start deleting because either it's our item, or it's |
319 | | * some delete-marked items and then our item. |
320 | | */ |
321 | 0 | next = atomic_load_explicit(&h->first, memory_order_consume); |
322 | |
|
323 | 0 | do { |
324 | 0 | item = atomsort_itemp(next); |
325 | 0 | if (!item) |
326 | 0 | return NULL; |
327 | | |
328 | | /* try to mark deletion */ |
329 | 0 | next = atomic_fetch_or_explicit(&item->next, ATOMPTR_LOCK, |
330 | 0 | memory_order_acquire); |
331 | |
|
332 | 0 | } while (atomptr_l(next)); |
333 | | /* if loop is taken: delete race on same item (another pop or del) |
334 | | * => proceed to next item |
335 | | * if loop exited here: we have our item selected and marked |
336 | | */ |
337 | 0 | atomsort_del_core(h, item, &h->first, next); |
338 | 0 | return item; |
339 | 0 | } |