/rust/registry/src/index.crates.io-1949cf8c6b5b557f/flurry-0.5.2/src/node.rs
Line | Count | Source |
1 | | use crate::raw::Table; |
2 | | use crate::reclaim::{Atomic, Collector, Guard, RetireShared, Shared}; |
3 | | use core::sync::atomic::{AtomicBool, AtomicI64, Ordering}; |
4 | | use parking_lot::Mutex; |
5 | | use seize::{Link, Linked}; |
6 | | use std::borrow::Borrow; |
7 | | use std::thread::{current, park, Thread}; |
8 | | |
9 | | /// Entry in a bin. |
10 | | /// |
11 | | /// Will _generally_ be `Node`. Any entry that is not first in the bin, will be a `Node`. |
12 | | #[derive(Debug)] |
13 | | pub(crate) enum BinEntry<K, V> { |
14 | | Node(Node<K, V>), |
15 | | Tree(TreeBin<K, V>), |
16 | | TreeNode(TreeNode<K, V>), |
17 | | Moved, |
18 | | } |
19 | | |
20 | | unsafe impl<K, V> Send for BinEntry<K, V> |
21 | | where |
22 | | K: Send, |
23 | | V: Send, |
24 | | Node<K, V>: Send, |
25 | | Table<K, V>: Send, |
26 | | { |
27 | | } |
28 | | |
29 | | unsafe impl<K, V> Sync for BinEntry<K, V> |
30 | | where |
31 | | K: Sync, |
32 | | V: Sync, |
33 | | Node<K, V>: Sync, |
34 | | Table<K, V>: Sync, |
35 | | { |
36 | | } |
37 | | |
38 | | impl<K, V> BinEntry<K, V> { |
39 | 0 | pub(crate) fn as_node(&self) -> Option<&Node<K, V>> { |
40 | 0 | if let BinEntry::Node(ref n) = *self { |
41 | 0 | Some(n) |
42 | | } else { |
43 | 0 | None |
44 | | } |
45 | 0 | } Unexecuted instantiation: <flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::as_node Unexecuted instantiation: <flurry::node::BinEntry<_, _>>::as_node |
46 | | |
47 | 0 | pub(crate) fn as_tree_node(&self) -> Option<&TreeNode<K, V>> { |
48 | 0 | if let BinEntry::TreeNode(ref n) = *self { |
49 | 0 | Some(n) |
50 | | } else { |
51 | 0 | None |
52 | | } |
53 | 0 | } Unexecuted instantiation: <flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::as_tree_node Unexecuted instantiation: <flurry::node::BinEntry<_, _>>::as_tree_node |
54 | 0 | pub(crate) fn as_tree_bin(&self) -> Option<&TreeBin<K, V>> { |
55 | 0 | if let BinEntry::Tree(ref bin) = *self { |
56 | 0 | Some(bin) |
57 | | } else { |
58 | 0 | None |
59 | | } |
60 | 0 | } Unexecuted instantiation: <flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::as_tree_bin Unexecuted instantiation: <flurry::node::BinEntry<_, _>>::as_tree_bin |
61 | | } |
62 | | |
63 | | /// Key-value entry. |
64 | | #[derive(Debug)] |
65 | | pub(crate) struct Node<K, V> { |
66 | | pub(crate) hash: u64, |
67 | | pub(crate) key: K, |
68 | | pub(crate) value: Atomic<V>, |
69 | | pub(crate) next: Atomic<BinEntry<K, V>>, |
70 | | pub(crate) lock: Mutex<()>, |
71 | | } |
72 | | |
73 | | impl<K, V> Node<K, V> { |
74 | 0 | pub(crate) fn new<AV>(hash: u64, key: K, value: AV) -> Self |
75 | 0 | where |
76 | 0 | AV: Into<Atomic<V>>, |
77 | | { |
78 | 0 | Node::with_next(hash, key, value, Atomic::null()) |
79 | 0 | } Unexecuted instantiation: <flurry::node::Node<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::new::<flurry::reclaim::Atomic<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>> Unexecuted instantiation: <flurry::node::Node<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::new::<flurry::reclaim::Shared<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>> Unexecuted instantiation: <flurry::node::Node<_, _>>::new::<_> |
80 | 0 | pub(crate) fn with_next<AV>(hash: u64, key: K, value: AV, next: Atomic<BinEntry<K, V>>) -> Self |
81 | 0 | where |
82 | 0 | AV: Into<Atomic<V>>, |
83 | | { |
84 | 0 | Node { |
85 | 0 | hash, |
86 | 0 | key, |
87 | 0 | value: value.into(), |
88 | 0 | next, |
89 | 0 | lock: Mutex::new(()), |
90 | 0 | } |
91 | 0 | } Unexecuted instantiation: <flurry::node::Node<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::with_next::<flurry::reclaim::Atomic<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>> Unexecuted instantiation: <flurry::node::Node<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::with_next::<flurry::reclaim::Shared<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>> Unexecuted instantiation: <flurry::node::Node<_, _>>::with_next::<_> |
92 | | } |
93 | | |
94 | | /* ------------------------ TreeNodes ------------------------ */ |
95 | | |
96 | | /// Nodes for use in TreeBins. |
97 | | #[derive(Debug)] |
98 | | pub(crate) struct TreeNode<K, V> { |
99 | | // Node properties |
100 | | pub(crate) node: Node<K, V>, |
101 | | |
102 | | // red-black tree links |
103 | | pub(crate) parent: Atomic<BinEntry<K, V>>, |
104 | | pub(crate) left: Atomic<BinEntry<K, V>>, |
105 | | pub(crate) right: Atomic<BinEntry<K, V>>, |
106 | | pub(crate) prev: Atomic<BinEntry<K, V>>, // needed to unlink `next` upon deletion |
107 | | pub(crate) red: AtomicBool, |
108 | | } |
109 | | |
110 | | impl<K, V> TreeNode<K, V> { |
111 | | /// Constructs a new TreeNode with the given attributes to be inserted into a TreeBin. |
112 | | /// |
113 | | /// This does yet not arrange this node and its `next` nodes into a tree, since the tree |
114 | | /// structure is maintained globally by the TreeBin. |
115 | 0 | pub(crate) fn new( |
116 | 0 | hash: u64, |
117 | 0 | key: K, |
118 | 0 | value: Atomic<V>, |
119 | 0 | next: Atomic<BinEntry<K, V>>, |
120 | 0 | parent: Atomic<BinEntry<K, V>>, |
121 | 0 | ) -> Self { |
122 | 0 | TreeNode { |
123 | 0 | node: Node::with_next(hash, key, value, next), |
124 | 0 | parent, |
125 | 0 | left: Atomic::null(), |
126 | 0 | right: Atomic::null(), |
127 | 0 | prev: Atomic::null(), |
128 | 0 | red: AtomicBool::new(false), |
129 | 0 | } |
130 | 0 | } Unexecuted instantiation: <flurry::node::TreeNode<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::new Unexecuted instantiation: <flurry::node::TreeNode<_, _>>::new |
131 | | |
132 | | /// Returns the `TreeNode` (or `Shared::null()` if not found) for the given |
133 | | /// key, starting at the given node. |
134 | 0 | pub(crate) fn find_tree_node<'g, Q>( |
135 | 0 | from: Shared<'g, BinEntry<K, V>>, |
136 | 0 | hash: u64, |
137 | 0 | key: &Q, |
138 | 0 | guard: &'g Guard<'_>, |
139 | 0 | ) -> Shared<'g, BinEntry<K, V>> |
140 | 0 | where |
141 | 0 | K: Borrow<Q>, |
142 | 0 | Q: ?Sized + Ord, |
143 | | { |
144 | | // NOTE: in the Java code, this method is implemented on the `TreeNode` |
145 | | // instance directly, as they don't need to worry about shared pointers. |
146 | | // The Java code then uses a do-while loop here, as `self`/`this` always |
147 | | // exists so the condition below will always be satisfied on the first |
148 | | // iteration. We however _do_ have shared pointers and _do not_ have |
149 | | // do-while loops, so we end up with one extra check since we also need |
150 | | // to introduce some `continue` due to the extraction of local |
151 | | // assignments from checks. |
152 | 0 | let mut p = from; |
153 | 0 | while !p.is_null() { |
154 | | // safety: the containing TreeBin of all TreeNodes was read under our |
155 | | // guard, at which point the tree structure was valid. Since our guard |
156 | | // marks the current thread as active, the TreeNodes remain valid for |
157 | | // at least as long as we hold onto the guard. |
158 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
159 | 0 | let p_deref = unsafe { Self::get_tree_node(p) }; |
160 | 0 | let p_hash = p_deref.node.hash; |
161 | | |
162 | | // first attempt to follow the tree order with the given hash |
163 | 0 | match p_hash.cmp(&hash) { |
164 | | std::cmp::Ordering::Greater => { |
165 | 0 | p = p_deref.left.load(Ordering::SeqCst, guard); |
166 | 0 | continue; |
167 | | } |
168 | | std::cmp::Ordering::Less => { |
169 | 0 | p = p_deref.right.load(Ordering::SeqCst, guard); |
170 | 0 | continue; |
171 | | } |
172 | 0 | _ => {} |
173 | | } |
174 | | |
175 | | // if the hash matches, check if the given key also matches. If so, |
176 | | // we have found the target node. |
177 | 0 | let p_key = &p_deref.node.key; |
178 | 0 | if p_key.borrow() == key { |
179 | 0 | return p; |
180 | 0 | } |
181 | | |
182 | | // If the key does not match, we need to descend down the tree. |
183 | 0 | let p_left = p_deref.left.load(Ordering::SeqCst, guard); |
184 | 0 | let p_right = p_deref.right.load(Ordering::SeqCst, guard); |
185 | | // If one of the children is empty, there is only one child to check. |
186 | 0 | if p_left.is_null() { |
187 | 0 | p = p_right; |
188 | 0 | continue; |
189 | 0 | } else if p_right.is_null() { |
190 | 0 | p = p_left; |
191 | 0 | continue; |
192 | 0 | } |
193 | | |
194 | | // Otherwise, we compare keys to find the next child to look at. |
195 | 0 | p = match p_key.borrow().cmp(key) { |
196 | 0 | std::cmp::Ordering::Greater => p_left, |
197 | 0 | std::cmp::Ordering::Less => p_right, |
198 | | std::cmp::Ordering::Equal => { |
199 | 0 | unreachable!("Ord and Eq have to match and Eq is checked above") |
200 | | } |
201 | | }; |
202 | | // NOTE: the Java code has some addional cases here in case the keys |
203 | | // _are not_ equal (p_key != key and !key.equals(p_key)), but |
204 | | // _compare_ equal (k.compareTo(p_key) == 0). In this case, both |
205 | | // children are searched. Since `Eq` and `Ord` must match, these |
206 | | // cases cannot occur here. |
207 | | } |
208 | | |
209 | 0 | Shared::null() |
210 | 0 | } Unexecuted instantiation: <flurry::node::TreeNode<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::find_tree_node::<u64> Unexecuted instantiation: <flurry::node::TreeNode<_, _>>::find_tree_node::<_> |
211 | | } |
212 | | |
213 | | const WRITER: i64 = 1; // set while holding write lock |
214 | | const WAITER: i64 = 2; // set when waiting for write lock |
215 | | const READER: i64 = 4; // increment value for setting read lock |
216 | | |
217 | | /// Private representation for movement direction along tree successors. |
218 | | enum Dir { |
219 | | Left, |
220 | | Right, |
221 | | } |
222 | | |
223 | | /// TreeNodes used at the heads of bins. TreeBins do not hold user keys or |
224 | | /// values, but instead point to a list of TreeNodes and their root. They also |
225 | | /// maintain a parasitic read-write lock forcing writers (who hold the bin lock) |
226 | | /// to wait for readers (who do not) to complete before tree restructuring |
227 | | /// operations. |
228 | | #[derive(Debug)] |
229 | | pub(crate) struct TreeBin<K, V> { |
230 | | pub(crate) root: Atomic<BinEntry<K, V>>, |
231 | | pub(crate) first: Atomic<BinEntry<K, V>>, |
232 | | pub(crate) waiter: Atomic<Thread>, |
233 | | pub(crate) lock: parking_lot::Mutex<()>, |
234 | | pub(crate) lock_state: AtomicI64, |
235 | | } |
236 | | |
237 | | impl<K, V> TreeBin<K, V> |
238 | | where |
239 | | K: Ord, |
240 | | { |
241 | | /// Constructs a new bin from the given nodes. |
242 | | /// |
243 | | /// Nodes are arranged into an ordered red-black tree. |
244 | | /// |
245 | | /// # Safety |
246 | | /// |
247 | | /// The `bin` pointer and its successors were created with `Shared::boxed` and never shared. |
248 | 0 | pub(crate) unsafe fn new(bin: Shared<'_, BinEntry<K, V>>, guard: &Guard<'_>) -> Self { |
249 | 0 | let mut root = Shared::null(); |
250 | | |
251 | | // safety: We own the nodes for creating this new TreeBin, so they are |
252 | | // not shared with another thread and cannot get invalidated. |
253 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
254 | 0 | let mut x = bin; |
255 | 0 | while !x.is_null() { |
256 | 0 | let x_deref = unsafe { TreeNode::get_tree_node(x) }; |
257 | 0 | let next = x_deref.node.next.load(Ordering::Relaxed, guard); |
258 | 0 | x_deref.left.store(Shared::null(), Ordering::Relaxed); |
259 | 0 | x_deref.right.store(Shared::null(), Ordering::Relaxed); |
260 | | |
261 | | // if there is no root yet, make x the root |
262 | 0 | if root.is_null() { |
263 | 0 | x_deref.parent.store(Shared::null(), Ordering::Relaxed); |
264 | 0 | x_deref.red.store(false, Ordering::Relaxed); |
265 | 0 | root = x; |
266 | 0 | x = next; |
267 | 0 | continue; |
268 | 0 | } |
269 | | |
270 | 0 | let key = &x_deref.node.key; |
271 | 0 | let hash = x_deref.node.hash; |
272 | | |
273 | | // Traverse the tree that was constructed so far from the root to |
274 | | // find out where to insert x |
275 | 0 | let mut p = root; |
276 | | loop { |
277 | 0 | let p_deref = unsafe { TreeNode::get_tree_node(p) }; |
278 | 0 | let p_key = &p_deref.node.key; |
279 | 0 | let p_hash = p_deref.node.hash; |
280 | | |
281 | | // Select successor of p in the correct direction. We will continue |
282 | | // to descend the tree through this successor. |
283 | 0 | let xp = p; |
284 | | let dir; |
285 | 0 | p = match p_hash.cmp(&hash).then(p_key.cmp(key)) { |
286 | | std::cmp::Ordering::Greater => { |
287 | 0 | dir = Dir::Left; |
288 | 0 | &p_deref.left |
289 | | } |
290 | | std::cmp::Ordering::Less => { |
291 | 0 | dir = Dir::Right; |
292 | 0 | &p_deref.right |
293 | | } |
294 | 0 | std::cmp::Ordering::Equal => unreachable!("one key references two nodes"), |
295 | | } |
296 | 0 | .load(Ordering::Relaxed, guard); |
297 | | |
298 | 0 | if p.is_null() { |
299 | 0 | x_deref.parent.store(xp, Ordering::Relaxed); |
300 | 0 | match dir { |
301 | 0 | Dir::Left => { |
302 | 0 | unsafe { TreeNode::get_tree_node(xp) } |
303 | 0 | .left |
304 | 0 | .store(x, Ordering::Relaxed); |
305 | 0 | } |
306 | 0 | Dir::Right => { |
307 | 0 | unsafe { TreeNode::get_tree_node(xp) } |
308 | 0 | .right |
309 | 0 | .store(x, Ordering::Relaxed); |
310 | 0 | } |
311 | | } |
312 | 0 | root = TreeNode::balance_insertion(root, x, guard); |
313 | 0 | break; |
314 | 0 | } |
315 | | } |
316 | | |
317 | 0 | x = next; |
318 | | } |
319 | | |
320 | 0 | if cfg!(debug_assertions) { |
321 | 0 | TreeNode::check_invariants(root, guard); |
322 | 0 | } |
323 | 0 | TreeBin { |
324 | 0 | root: Atomic::from(root), |
325 | 0 | first: Atomic::from(bin), |
326 | 0 | waiter: Atomic::null(), |
327 | 0 | lock: parking_lot::Mutex::new(()), |
328 | 0 | lock_state: AtomicI64::new(0), |
329 | 0 | } |
330 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::new Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::new |
331 | | } |
332 | | |
333 | | impl<K, V> TreeBin<K, V> { |
334 | | /// Acquires write lock for tree restucturing. |
335 | 0 | fn lock_root(&self, guard: &Guard<'_>, collector: &Collector) { |
336 | 0 | if self |
337 | 0 | .lock_state |
338 | 0 | .compare_exchange(0, WRITER, Ordering::SeqCst, Ordering::Relaxed) |
339 | 0 | .is_err() |
340 | 0 | { |
341 | 0 | // the current lock state is non-zero, which means the lock is contended |
342 | 0 | self.contended_lock(guard, collector); |
343 | 0 | } |
344 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::lock_root Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::lock_root |
345 | | |
346 | | /// Releases write lock for tree restructuring. |
347 | 0 | fn unlock_root(&self) { |
348 | 0 | self.lock_state.store(0, Ordering::Release); |
349 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::unlock_root Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::unlock_root |
350 | | |
351 | | /// Possibly blocks awaiting root lock. |
352 | 0 | fn contended_lock(&self, guard: &Guard<'_>, collector: &Collector) { |
353 | 0 | let mut waiting = false; |
354 | | let mut state: i64; |
355 | | loop { |
356 | 0 | state = self.lock_state.load(Ordering::Acquire); |
357 | 0 | if state & !WAITER == 0 { |
358 | | // there are no writing or reading threads |
359 | 0 | if self |
360 | 0 | .lock_state |
361 | 0 | .compare_exchange(state, WRITER, Ordering::SeqCst, Ordering::Relaxed) |
362 | 0 | .is_ok() |
363 | | { |
364 | | // we won the race for the lock and get to return from blocking |
365 | 0 | if waiting { |
366 | 0 | let waiter = self.waiter.swap(Shared::null(), Ordering::SeqCst, guard); |
367 | 0 | // safety: we are the only thread that modifies the |
368 | 0 | // `waiter` thread handle (reading threads only use it |
369 | 0 | // to notify us). Thus, having stored a valid value |
370 | 0 | // below, `waiter` is a valid pointer. |
371 | 0 | // |
372 | 0 | // The reading thread that notifies us does so as its |
373 | 0 | // last action in `find` and then lets go of the |
374 | 0 | // reference immediately. _New_ reading threads already |
375 | 0 | // take the slow path since we are `WAITING`, so they do |
376 | 0 | // not obtain new references to our thread handle. Also, |
377 | 0 | // we just swapped out that handle, so it is no longer |
378 | 0 | // reachable. |
379 | 0 | // |
380 | 0 | // We cannot safely drop the waiter immediately, because we may not have |
381 | 0 | // parked after storing our thread handle in `waiter`. This can happen if |
382 | 0 | // we noticed that there were no readers immediately after setting us as |
383 | 0 | // the waiter, and then went directly into this branch. In that case, some |
384 | 0 | // other thread may simultaneously have noticed that we wanted to be woken |
385 | 0 | // up, and be trying to call `.unpark`. So, we `retire_shared` instead. |
386 | 0 | unsafe { guard.retire_shared(waiter) }; |
387 | 0 | } |
388 | 0 | return; |
389 | 0 | } |
390 | 0 | } else if state & WAITER == 0 { |
391 | | // we have not indicated yet that we are waiting, so we need to |
392 | | // do that now |
393 | 0 | if self |
394 | 0 | .lock_state |
395 | 0 | .compare_exchange(state, state | WAITER, Ordering::SeqCst, Ordering::Relaxed) |
396 | 0 | .is_ok() |
397 | | { |
398 | 0 | waiting = true; |
399 | 0 | let current_thread = Shared::boxed(current(), collector); |
400 | 0 | let waiter = self.waiter.swap(current_thread, Ordering::SeqCst, guard); |
401 | 0 | assert!(waiter.is_null()); |
402 | 0 | } |
403 | 0 | } else if waiting { |
404 | 0 | park(); |
405 | 0 | } |
406 | 0 | std::hint::spin_loop(); |
407 | | } |
408 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::contended_lock Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::contended_lock |
409 | | |
410 | | /// Returns matching node or `Shared::null()` if none. Tries to search using |
411 | | /// tree comparisons from root, but continues linear search when lock not |
412 | | /// available. |
413 | 0 | pub(crate) fn find<'g, Q>( |
414 | 0 | bin: Shared<'g, BinEntry<K, V>>, |
415 | 0 | hash: u64, |
416 | 0 | key: &Q, |
417 | 0 | guard: &'g Guard<'_>, |
418 | 0 | ) -> Shared<'g, BinEntry<K, V>> |
419 | 0 | where |
420 | 0 | K: Borrow<Q>, |
421 | 0 | Q: ?Sized + Ord, |
422 | | { |
423 | | // safety: bin is a valid pointer. |
424 | | // |
425 | | // there are three cases when a bin pointer is invalidated: |
426 | | // |
427 | | // 1. if the table was resized, bin is a move entry, and the resize has completed. in |
428 | | // that case, the table (and all its heads) will be retired. |
429 | | // 2. if the table is being resized, bin may be swapped with a move entry. the old bin |
430 | | // will be retired. |
431 | | // 3. when elements are inserted into or removed from the map, bin may be changed into |
432 | | // or from a TreeBin from or into a regular, linear bin. the old bin will also be |
433 | | // retired. |
434 | | // |
435 | | // in all cases, we held the guard when we got the reference to the bin. if any such |
436 | | // swap happened, it must have happened _after_ we read. since we did the read while |
437 | | // the current thread was marked as active, we must be included in the reference count, |
438 | | // and the drop must happen _after_ we decrement the count (i.e drop our guard). |
439 | 0 | let bin_deref = unsafe { bin.deref() }.as_tree_bin().unwrap(); |
440 | 0 | let mut element = bin_deref.first.load(Ordering::SeqCst, guard); |
441 | 0 | while !element.is_null() { |
442 | 0 | let s = bin_deref.lock_state.load(Ordering::SeqCst); |
443 | 0 | if s & (WAITER | WRITER) != 0 { |
444 | | // another thread is modifying or wants to modify the tree |
445 | | // (write). As long as that's the case, we follow the `next` |
446 | | // pointers of the `TreeNode` linearly, as we cannot trust the |
447 | | // tree's structure. |
448 | | // |
449 | | // safety: we read under our guard, at which point the tree |
450 | | // structure was valid. Since our guard marks the current thread |
451 | | // as active, the TreeNodes remain valid for at least as long as |
452 | | // we hold onto the guard. |
453 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
454 | 0 | let element_deref = unsafe { TreeNode::get_tree_node(element) }; |
455 | 0 | let element_key = &element_deref.node.key; |
456 | 0 | if element_deref.node.hash == hash && element_key.borrow() == key { |
457 | 0 | return element; |
458 | 0 | } |
459 | 0 | element = element_deref.node.next.load(Ordering::SeqCst, guard); |
460 | 0 | } else if bin_deref |
461 | 0 | .lock_state |
462 | 0 | .compare_exchange(s, s + READER, Ordering::SeqCst, Ordering::Relaxed) |
463 | 0 | .is_ok() |
464 | | { |
465 | | // the current lock state indicates no waiter or writer and we |
466 | | // acquired a read lock |
467 | 0 | let root = bin_deref.root.load(Ordering::SeqCst, guard); |
468 | 0 | let p = if root.is_null() { |
469 | 0 | Shared::null() |
470 | | } else { |
471 | 0 | TreeNode::find_tree_node(root, hash, key, guard) |
472 | | }; |
473 | 0 | if bin_deref.lock_state.fetch_add(-READER, Ordering::SeqCst) == (READER | WAITER) { |
474 | | // we were the last reader holding up a waiting writer, so |
475 | | // we unpark the waiting writer by granting it a token |
476 | 0 | let waiter = &bin_deref.waiter.load(Ordering::SeqCst, guard); |
477 | 0 | if !waiter.is_null() { |
478 | 0 | // safety: thread handles are only dropped by the thread |
479 | 0 | // they represent _after_ it acquires the write lock. |
480 | 0 | // Since the thread behind the `waiter` handle is |
481 | 0 | // currently _waiting_ on said lock, the handle will not |
482 | 0 | // yet be dropped. |
483 | 0 | unsafe { waiter.deref() }.unpark(); |
484 | 0 | } |
485 | 0 | } |
486 | 0 | return p; |
487 | 0 | } |
488 | | } |
489 | | |
490 | 0 | Shared::null() |
491 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::find::<u64> Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::find::<_> |
492 | | |
493 | | /// Unlinks the given node, which must be present before this call. |
494 | | /// |
495 | | /// This is messier than typical red-black deletion code because we cannot |
496 | | /// swap the contents of an interior node with a leaf successor that is |
497 | | /// pinned by `next` pointers that are accessible independently of the bin |
498 | | /// lock. So instead we swap the tree links. |
499 | | /// |
500 | | /// Returns `true` if the bin is now too small and should be untreeified. |
501 | | /// |
502 | | /// # Safety |
503 | | /// The given node is only marked for garbage collection if the bin remains |
504 | | /// large enough to be a `TreeBin`. If this method returns `true`, indicating |
505 | | /// that the bin should be untreeified, the given node is only unlinked from |
506 | | /// linear traversal, but not from the tree. This makes the node unreachable |
507 | | /// through linear reads and excludes it from being dropped when the bin is |
508 | | /// dropped. However, reading threads may still obtain a reference to until |
509 | | /// the bin is swapped out for a linear bin. |
510 | | /// |
511 | | /// The caller of this method _must_ ensure that the given node is properly |
512 | | /// marked for garbage collection _after_ this bin has been swapped out. If |
513 | | /// the value of the given node was supposed to get dropped as well |
514 | | /// (`drop_value` was true), the caller must do the same for the value. |
515 | 0 | pub(crate) unsafe fn remove_tree_node<'g>( |
516 | 0 | &'g self, |
517 | 0 | p: Shared<'g, BinEntry<K, V>>, |
518 | 0 | drop_value: bool, |
519 | 0 | guard: &'g Guard<'_>, |
520 | 0 | collector: &Collector, |
521 | 0 | ) -> bool { |
522 | | // safety: we read under our guard, at which point the tree |
523 | | // structure was valid. Since our guard marks the current thread as active, |
524 | | // the TreeNodes remain valid for at least as long as we hold onto the |
525 | | // guard. Additionally, this method assumes `p` to be non-null. |
526 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
527 | 0 | let p_deref = TreeNode::get_tree_node(p); |
528 | 0 | let next = p_deref.node.next.load(Ordering::SeqCst, guard); |
529 | 0 | let prev = p_deref.prev.load(Ordering::SeqCst, guard); |
530 | | |
531 | | // unlink traversal pointers |
532 | 0 | if prev.is_null() { |
533 | 0 | // the node to delete is the first node |
534 | 0 | self.first.store(next, Ordering::SeqCst); |
535 | 0 | } else { |
536 | 0 | TreeNode::get_tree_node(prev) |
537 | 0 | .node |
538 | 0 | .next |
539 | 0 | .store(next, Ordering::SeqCst); |
540 | 0 | } |
541 | 0 | if !next.is_null() { |
542 | 0 | TreeNode::get_tree_node(next) |
543 | 0 | .prev |
544 | 0 | .store(prev, Ordering::SeqCst); |
545 | 0 | } |
546 | | |
547 | 0 | if self.first.load(Ordering::SeqCst, guard).is_null() { |
548 | | // since the bin was not empty previously (it contained p), |
549 | | // `self.first` is `null` only if we just stored `null` via `next`. |
550 | | // In that case, we have removed the last node from this bin and |
551 | | // don't have a tree anymore, so we reset `self.root`. |
552 | 0 | self.root.store(Shared::null(), Ordering::SeqCst); |
553 | 0 | return true; |
554 | 0 | } |
555 | | |
556 | | // if we are now too small to be a `TreeBin`, we don't need to worry |
557 | | // about restructuring the tree since the bin will be untreeified |
558 | | // anyway, so we check that |
559 | 0 | let mut root = self.root.load(Ordering::SeqCst, guard); |
560 | | // TODO: Can `root` even be `null`? |
561 | | // The Java code has `NULL` checks for this, but in theory it should not be possible to |
562 | | // encounter a tree that has no root when we have its lock. It should always have at |
563 | | // least `UNTREEIFY_THRESHOLD` nodes. If it is indeed impossible we should replace |
564 | | // this with an assertion instead. |
565 | 0 | if root.is_null() |
566 | 0 | || TreeNode::get_tree_node(root) |
567 | 0 | .right |
568 | 0 | .load(Ordering::SeqCst, guard) |
569 | 0 | .is_null() |
570 | | { |
571 | 0 | return true; |
572 | | } else { |
573 | 0 | let root_left = TreeNode::get_tree_node(root) |
574 | 0 | .left |
575 | 0 | .load(Ordering::SeqCst, guard); |
576 | 0 | if root_left.is_null() |
577 | 0 | || TreeNode::get_tree_node(root_left) |
578 | 0 | .left |
579 | 0 | .load(Ordering::SeqCst, guard) |
580 | 0 | .is_null() |
581 | | { |
582 | 0 | return true; |
583 | 0 | } |
584 | | } |
585 | | |
586 | | // if we get here, we know that we will still be a tree and have |
587 | | // unlinked the `next` and `prev` pointers, so it's time to restructure |
588 | | // the tree |
589 | 0 | self.lock_root(guard, collector); |
590 | | // NOTE: since we have the write lock for the tree, we know that all |
591 | | // readers will read along the linear `next` pointers until we release |
592 | | // the lock (these pointers were adjusted above to exclude the removed |
593 | | // node and are synchronized as `SeqCst`). This means that we can |
594 | | // operate on the _other_ pointers of tree nodes that represent the tree |
595 | | // structure using a `Relaxed` ordering. The release of the write lock |
596 | | // will then synchronize with later readers who will see the new values. |
597 | | let replacement; |
598 | 0 | let p_left = p_deref.left.load(Ordering::Relaxed, guard); |
599 | 0 | let p_right = p_deref.right.load(Ordering::Relaxed, guard); |
600 | 0 | if !p_left.is_null() && !p_right.is_null() { |
601 | | // find the smalles successor of `p` |
602 | 0 | let mut successor = p_right; |
603 | 0 | let mut successor_deref = TreeNode::get_tree_node(successor); |
604 | 0 | let mut successor_left = successor_deref.left.load(Ordering::Relaxed, guard); |
605 | 0 | while !successor_left.is_null() { |
606 | 0 | successor = successor_left; |
607 | 0 | successor_deref = TreeNode::get_tree_node(successor); |
608 | 0 | successor_left = successor_deref.left.load(Ordering::Relaxed, guard); |
609 | 0 | } |
610 | | // swap colors |
611 | 0 | let color = successor_deref.red.load(Ordering::Relaxed); |
612 | 0 | successor_deref |
613 | 0 | .red |
614 | 0 | .store(p_deref.red.load(Ordering::Relaxed), Ordering::Relaxed); |
615 | 0 | p_deref.red.store(color, Ordering::Relaxed); |
616 | | |
617 | 0 | let successor_right = successor_deref.right.load(Ordering::Relaxed, guard); |
618 | 0 | let p_parent = p_deref.parent.load(Ordering::Relaxed, guard); |
619 | 0 | if successor == p_right { |
620 | 0 | // `p` was the direct parent of the smallest successor. |
621 | 0 | // the two nodes will be swapped |
622 | 0 | p_deref.parent.store(successor, Ordering::Relaxed); |
623 | 0 | successor_deref.right.store(p, Ordering::Relaxed); |
624 | 0 | } else { |
625 | 0 | let successor_parent = successor_deref.parent.load(Ordering::Relaxed, guard); |
626 | 0 | p_deref.parent.store(successor_parent, Ordering::Relaxed); |
627 | 0 | if !successor_parent.is_null() { |
628 | 0 | if successor |
629 | 0 | == TreeNode::get_tree_node(successor_parent) |
630 | 0 | .left |
631 | 0 | .load(Ordering::Relaxed, guard) |
632 | 0 | { |
633 | 0 | TreeNode::get_tree_node(successor_parent) |
634 | 0 | .left |
635 | 0 | .store(p, Ordering::Relaxed); |
636 | 0 | } else { |
637 | 0 | TreeNode::get_tree_node(successor_parent) |
638 | 0 | .right |
639 | 0 | .store(p, Ordering::Relaxed); |
640 | 0 | } |
641 | 0 | } |
642 | 0 | successor_deref.right.store(p_right, Ordering::Relaxed); |
643 | 0 | if !p_right.is_null() { |
644 | 0 | TreeNode::get_tree_node(p_right) |
645 | 0 | .parent |
646 | 0 | .store(successor, Ordering::Relaxed); |
647 | 0 | } |
648 | | } |
649 | 0 | debug_assert!(successor_left.is_null()); |
650 | 0 | p_deref.left.store(Shared::null(), Ordering::Relaxed); |
651 | 0 | p_deref.right.store(successor_right, Ordering::Relaxed); |
652 | 0 | if !successor_right.is_null() { |
653 | 0 | TreeNode::get_tree_node(successor_right) |
654 | 0 | .parent |
655 | 0 | .store(p, Ordering::Relaxed); |
656 | 0 | } |
657 | 0 | successor_deref.left.store(p_left, Ordering::Relaxed); |
658 | 0 | if !p_left.is_null() { |
659 | 0 | TreeNode::get_tree_node(p_left) |
660 | 0 | .parent |
661 | 0 | .store(successor, Ordering::Relaxed); |
662 | 0 | } |
663 | 0 | successor_deref.parent.store(p_parent, Ordering::Relaxed); |
664 | 0 | if p_parent.is_null() { |
665 | 0 | // the successor was swapped to the root as `p` was previously the root |
666 | 0 | root = successor; |
667 | 0 | } else if p |
668 | 0 | == TreeNode::get_tree_node(p_parent) |
669 | 0 | .left |
670 | 0 | .load(Ordering::Relaxed, guard) |
671 | 0 | { |
672 | 0 | TreeNode::get_tree_node(p_parent) |
673 | 0 | .left |
674 | 0 | .store(successor, Ordering::Relaxed); |
675 | 0 | } else { |
676 | 0 | TreeNode::get_tree_node(p_parent) |
677 | 0 | .right |
678 | 0 | .store(successor, Ordering::Relaxed); |
679 | 0 | } |
680 | | |
681 | | // We have swapped `p` with `successor`, which is the next element |
682 | | // after `p` in `(hash, key)` order (the smallest element larger |
683 | | // than `p`). To actually remove `p`, we need to check if |
684 | | // `successor` has a right child (it cannot have a left child, as |
685 | | // otherwise _it_ would be the `successor`). If not, we can just |
686 | | // directly unlink `p`, since it is now a leaf. Otherwise, we have |
687 | | // to replace it with the right child of `successor` (which is now |
688 | | // also its right child), which preserves the ordering. |
689 | 0 | if !successor_right.is_null() { |
690 | 0 | replacement = successor_right; |
691 | 0 | } else { |
692 | 0 | replacement = p; |
693 | 0 | } |
694 | 0 | } else if !p_left.is_null() { |
695 | 0 | // If `p` only has a left child, just replacing `p` with that child preserves the ordering. |
696 | 0 | replacement = p_left; |
697 | 0 | } else if !p_right.is_null() { |
698 | 0 | // Symmetrically, we can use its right child. |
699 | 0 | replacement = p_right; |
700 | 0 | } else { |
701 | 0 | // If `p` is _already_ a leaf, we can also just unlink it. |
702 | 0 | replacement = p; |
703 | 0 | } |
704 | | |
705 | 0 | if replacement != p { |
706 | | // `p` (at its potentially new position) has a child, so we need to do a replacement. |
707 | 0 | let p_parent = p_deref.parent.load(Ordering::Relaxed, guard); |
708 | 0 | TreeNode::get_tree_node(replacement) |
709 | 0 | .parent |
710 | 0 | .store(p_parent, Ordering::Relaxed); |
711 | 0 | if p_parent.is_null() { |
712 | 0 | root = replacement; |
713 | 0 | } else { |
714 | 0 | let p_parent_deref = TreeNode::get_tree_node(p_parent); |
715 | 0 | if p == p_parent_deref.left.load(Ordering::Relaxed, guard) { |
716 | 0 | p_parent_deref.left.store(replacement, Ordering::Relaxed); |
717 | 0 | } else { |
718 | 0 | p_parent_deref.right.store(replacement, Ordering::Relaxed); |
719 | 0 | } |
720 | | } |
721 | 0 | p_deref.parent.store(Shared::null(), Ordering::Relaxed); |
722 | 0 | p_deref.right.store(Shared::null(), Ordering::Relaxed); |
723 | 0 | p_deref.left.store(Shared::null(), Ordering::Relaxed); |
724 | 0 | } |
725 | | |
726 | 0 | self.root.store( |
727 | 0 | if p_deref.red.load(Ordering::Relaxed) { |
728 | 0 | root |
729 | | } else { |
730 | 0 | TreeNode::balance_deletion(root, replacement, guard) |
731 | | }, |
732 | 0 | Ordering::Relaxed, |
733 | | ); |
734 | | |
735 | 0 | if p == replacement { |
736 | | // `p` does _not_ have children, so we unlink it as a leaf. |
737 | 0 | let p_parent = p_deref.parent.load(Ordering::Relaxed, guard); |
738 | 0 | if !p_parent.is_null() { |
739 | 0 | let p_parent_deref = TreeNode::get_tree_node(p_parent); |
740 | 0 | if p == p_parent_deref.left.load(Ordering::Relaxed, guard) { |
741 | 0 | TreeNode::get_tree_node(p_parent) |
742 | 0 | .left |
743 | 0 | .store(Shared::null(), Ordering::Relaxed); |
744 | 0 | } else if p == p_parent_deref.right.load(Ordering::Relaxed, guard) { |
745 | 0 | p_parent_deref |
746 | 0 | .right |
747 | 0 | .store(Shared::null(), Ordering::Relaxed); |
748 | 0 | } |
749 | 0 | p_deref.parent.store(Shared::null(), Ordering::Relaxed); |
750 | 0 | debug_assert!(p_deref.left.load(Ordering::Relaxed, guard).is_null()); |
751 | 0 | debug_assert!(p_deref.right.load(Ordering::Relaxed, guard).is_null()); |
752 | 0 | } |
753 | 0 | } |
754 | 0 | self.unlock_root(); |
755 | | |
756 | | // mark the old node and its value for garbage collection |
757 | | // safety: we just completely unlinked `p` from both linear and tree |
758 | | // traversal, making it and its value unreachable for any future thread. |
759 | | // Any existing references to one of them were obtained under a guard |
760 | | // included in the reference count, and thus have to be released before |
761 | | // `p` is actually dropped. |
762 | | #[allow(unused_unsafe)] |
763 | | unsafe { |
764 | 0 | if drop_value { |
765 | 0 | guard.retire_shared(p_deref.node.value.load(Ordering::Relaxed, guard)); |
766 | 0 | } |
767 | 0 | guard.retire_shared(p); |
768 | | } |
769 | | |
770 | 0 | if cfg!(debug_assertions) { |
771 | 0 | TreeNode::check_invariants(self.root.load(Ordering::SeqCst, guard), guard); |
772 | 0 | } |
773 | 0 | false |
774 | 0 | } |
775 | | } |
776 | | |
777 | | impl<K, V> TreeBin<K, V> |
778 | | where |
779 | | K: Ord + Send + Sync, |
780 | | { |
781 | | /// Finds or adds a node to the tree. |
782 | | /// If a node for the given key already exists, it is returned. Otherwise, |
783 | | /// returns `Shared::null()`. |
784 | 0 | pub(crate) fn find_or_put_tree_val<'g>( |
785 | 0 | &'g self, |
786 | 0 | hash: u64, |
787 | 0 | key: K, |
788 | 0 | value: Shared<'g, V>, |
789 | 0 | guard: &'g Guard<'_>, |
790 | 0 | collector: &Collector, |
791 | 0 | ) -> Shared<'g, BinEntry<K, V>> { |
792 | 0 | let mut p = self.root.load(Ordering::SeqCst, guard); |
793 | 0 | if p.is_null() { |
794 | | // the current root is `null`, i.e. the tree is currently empty. |
795 | | // This, we simply insert the new entry as the root. |
796 | 0 | let tree_node = Shared::boxed( |
797 | 0 | BinEntry::TreeNode(TreeNode::new( |
798 | 0 | hash, |
799 | 0 | key, |
800 | 0 | Atomic::from(value), |
801 | 0 | Atomic::null(), |
802 | 0 | Atomic::null(), |
803 | 0 | )), |
804 | 0 | collector, |
805 | | ); |
806 | 0 | self.root.store(tree_node, Ordering::Release); |
807 | 0 | self.first.store(tree_node, Ordering::Release); |
808 | 0 | return Shared::null(); |
809 | 0 | } |
810 | | // safety: we read under our guard, at which point the tree |
811 | | // structure was valid. Since our guard marks the current thread as active, |
812 | | // the TreeNodes remain valid for at least as long as we hold onto the |
813 | | // guard. |
814 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
815 | | loop { |
816 | 0 | let p_deref = unsafe { TreeNode::get_tree_node(p) }; |
817 | 0 | let p_hash = p_deref.node.hash; |
818 | 0 | let xp = p; |
819 | | let dir; |
820 | 0 | p = match p_hash.cmp(&hash) { |
821 | | std::cmp::Ordering::Greater => { |
822 | 0 | dir = Dir::Left; |
823 | 0 | &p_deref.left |
824 | | } |
825 | | std::cmp::Ordering::Less => { |
826 | 0 | dir = Dir::Right; |
827 | 0 | &p_deref.right |
828 | | } |
829 | | std::cmp::Ordering::Equal => { |
830 | 0 | let p_key = &p_deref.node.key; |
831 | 0 | if *p_key == key { |
832 | | // a node with the given key already exists, so we return it |
833 | 0 | return p; |
834 | 0 | } |
835 | 0 | match p_key.cmp(&key) { |
836 | | std::cmp::Ordering::Greater => { |
837 | 0 | dir = Dir::Left; |
838 | 0 | &p_deref.left |
839 | | } |
840 | | std::cmp::Ordering::Less => { |
841 | 0 | dir = Dir::Right; |
842 | 0 | &p_deref.right |
843 | | } |
844 | | std::cmp::Ordering::Equal => { |
845 | 0 | unreachable!("Ord and Eq have to match and Eq is checked above") |
846 | | } |
847 | | } |
848 | | // NOTE: the Java code has some addional cases here in case the |
849 | | // keys _are not_ equal (p_key != key and !key.equals(p_key)), |
850 | | // but _compare_ equal (k.compareTo(p_key) == 0). In this case, |
851 | | // both children are searched and if a matching node exists it |
852 | | // is returned. Since `Eq` and `Ord` must match, these cases |
853 | | // cannot occur here. |
854 | | } |
855 | | } |
856 | 0 | .load(Ordering::SeqCst, guard); |
857 | | |
858 | 0 | if p.is_null() { |
859 | | // we have reached a tree leaf, so the given key is not yet |
860 | | // contained in the tree and we can add it at the correct |
861 | | // position (which is here, since we arrived here by comparing |
862 | | // hash and key of the new entry) |
863 | 0 | let first = self.first.load(Ordering::SeqCst, guard); |
864 | 0 | let x = Shared::boxed( |
865 | 0 | BinEntry::TreeNode(TreeNode::new( |
866 | 0 | hash, |
867 | 0 | key, |
868 | 0 | Atomic::from(value), |
869 | 0 | Atomic::from(first), |
870 | 0 | Atomic::from(xp), |
871 | 0 | )), |
872 | 0 | collector, |
873 | | ); |
874 | 0 | self.first.store(x, Ordering::SeqCst); |
875 | 0 | if !first.is_null() { |
876 | 0 | unsafe { TreeNode::get_tree_node(first) } |
877 | 0 | .prev |
878 | 0 | .store(x, Ordering::SeqCst); |
879 | 0 | } |
880 | 0 | match dir { |
881 | 0 | Dir::Left => { |
882 | 0 | unsafe { TreeNode::get_tree_node(xp) } |
883 | 0 | .left |
884 | 0 | .store(x, Ordering::SeqCst); |
885 | 0 | } |
886 | 0 | Dir::Right => { |
887 | 0 | unsafe { TreeNode::get_tree_node(xp) } |
888 | 0 | .right |
889 | 0 | .store(x, Ordering::SeqCst); |
890 | 0 | } |
891 | | } |
892 | | |
893 | 0 | if !unsafe { TreeNode::get_tree_node(xp) } |
894 | 0 | .red |
895 | 0 | .load(Ordering::SeqCst) |
896 | 0 | { |
897 | 0 | unsafe { TreeNode::get_tree_node(x) } |
898 | 0 | .red |
899 | 0 | .store(true, Ordering::SeqCst); |
900 | 0 | } else { |
901 | 0 | self.lock_root(guard, collector); |
902 | 0 | self.root.store( |
903 | 0 | TreeNode::balance_insertion( |
904 | 0 | self.root.load(Ordering::Relaxed, guard), |
905 | 0 | x, |
906 | 0 | guard, |
907 | 0 | ), |
908 | 0 | Ordering::Relaxed, |
909 | 0 | ); |
910 | 0 | self.unlock_root(); |
911 | 0 | } |
912 | 0 | break; |
913 | 0 | } |
914 | | } |
915 | | |
916 | 0 | if cfg!(debug_assertions) { |
917 | 0 | TreeNode::check_invariants(self.root.load(Ordering::SeqCst, guard), guard); |
918 | 0 | } |
919 | 0 | Shared::null() |
920 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::find_or_put_tree_val Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::find_or_put_tree_val |
921 | | } |
922 | | |
923 | | impl<K, V> Drop for TreeBin<K, V> { |
924 | 0 | fn drop(&mut self) { |
925 | | // safety: we have &mut self _and_ all references we have returned are bound to the |
926 | | // lifetime of their borrow of self, so there cannot be any outstanding references to |
927 | | // anything in the map. |
928 | 0 | unsafe { self.drop_fields(true) }; |
929 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>> as core::ops::drop::Drop>::drop Unexecuted instantiation: <flurry::node::TreeBin<_, _> as core::ops::drop::Drop>::drop |
930 | | } |
931 | | |
932 | | impl<K, V> TreeBin<K, V> { |
933 | | /// Defers dropping the given tree bin without its nodes' values. |
934 | | /// |
935 | | /// # Safety |
936 | | /// The given bin must be a valid, non-null BinEntry::TreeBin and the caller must ensure |
937 | | /// that no references to the bin can be obtained by other threads after the call to this |
938 | | /// method. |
939 | 0 | pub(crate) unsafe fn defer_drop_without_values<'g>( |
940 | 0 | bin: Shared<'g, BinEntry<K, V>>, |
941 | 0 | guard: &'g Guard<'_>, |
942 | 0 | ) { |
943 | 0 | guard.defer_retire(bin.as_ptr(), |link| { |
944 | 0 | let bin = unsafe { |
945 | | // SAFETY: `bin` is a `Linked<BinEntry<K, V>>` |
946 | 0 | let ptr: *mut Linked<BinEntry<K, V>> = Link::cast(link); |
947 | | // SAFETY: `retire` guarantees that we |
948 | | // have unique access to `bin` at this point |
949 | 0 | Box::from_raw(ptr).value |
950 | | }; |
951 | | |
952 | 0 | let BinEntry::Tree(mut tree_bin) = bin else { |
953 | 0 | unreachable!("bin is a tree bin"); |
954 | | }; |
955 | 0 | tree_bin.drop_fields(false); |
956 | 0 | }); Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::defer_drop_without_values::{closure#0}Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::defer_drop_without_values::{closure#0} |
957 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::defer_drop_without_values Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::defer_drop_without_values |
958 | | |
959 | | /// Drops the given tree bin, but only drops its nodes' values when specified. |
960 | | /// |
961 | | /// # Safety |
962 | | /// The pointer to the tree bin must be valid and the caller must be the single owner |
963 | | /// of the tree bin. If the nodes' values are to be dropped, there must be no outstanding |
964 | | /// references to these values in other threads and it must be impossible to obtain them. |
965 | 0 | pub(crate) unsafe fn drop_fields(&mut self, drop_values: bool) { |
966 | | // assume ownership of atomically shared references. note that it is |
967 | | // sufficient to follow the `next` pointers of the `first` element in |
968 | | // the bin, since the tree pointers point to the same nodes. |
969 | | |
970 | | // swap out first pointer so nodes will not get dropped again when |
971 | | // `tree_bin` is dropped |
972 | 0 | let guard = Guard::unprotected(); |
973 | 0 | let p = self.first.swap(Shared::null(), Ordering::Relaxed, &guard); |
974 | 0 | Self::drop_tree_nodes(p, drop_values, &guard); |
975 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::drop_fields Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::drop_fields |
976 | | |
977 | | /// Drops the given list of tree nodes, but only drops their values when specified. |
978 | | /// |
979 | | /// # Safety |
980 | | /// The pointers to the tree nodes must be valid and the caller must be the single owner |
981 | | /// of the tree nodes. If the nodes' values are to be dropped, there must be no outstanding |
982 | | /// references to these values in other threads and it must be impossible to obtain them. |
983 | 0 | pub(crate) unsafe fn drop_tree_nodes<'g>( |
984 | 0 | from: Shared<'g, BinEntry<K, V>>, |
985 | 0 | drop_values: bool, |
986 | 0 | guard: &'g Guard<'_>, |
987 | 0 | ) { |
988 | 0 | let mut p = from; |
989 | 0 | while !p.is_null() { |
990 | 0 | let BinEntry::TreeNode(tree_node) = p.into_box().value else { |
991 | 0 | unreachable!("Trees can only ever contain TreeNodes"); |
992 | | }; |
993 | | // if specified, drop the value in this node |
994 | 0 | if drop_values { |
995 | 0 | let _ = tree_node.node.value.into_box(); |
996 | 0 | } |
997 | | // then we move to the next node |
998 | 0 | p = tree_node.node.next.load(Ordering::SeqCst, guard); |
999 | | } |
1000 | 0 | } Unexecuted instantiation: <flurry::node::TreeBin<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::drop_tree_nodes Unexecuted instantiation: <flurry::node::TreeBin<_, _>>::drop_tree_nodes |
1001 | | } |
1002 | | |
1003 | | /* Helper impls to avoid code explosion */ |
1004 | | impl<K, V> TreeNode<K, V> { |
1005 | | /// Gets the `BinEntry::TreeNode(tree_node)` behind the given pointer and |
1006 | | /// returns its `tree_node`. |
1007 | | /// |
1008 | | /// # Safety |
1009 | | /// All safety concerns of [`deref`](Shared::deref) apply. In particular, the |
1010 | | /// supplied pointer must be non-null and must point to valid memory. |
1011 | | /// Additionally, it must point to an instance of BinEntry that is actually a |
1012 | | /// TreeNode. |
1013 | | #[inline] |
1014 | 0 | pub(crate) unsafe fn get_tree_node(bin: Shared<'_, BinEntry<K, V>>) -> &'_ TreeNode<K, V> { |
1015 | 0 | bin.deref().as_tree_node().unwrap() |
1016 | 0 | } Unexecuted instantiation: <flurry::node::TreeNode<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::get_tree_node Unexecuted instantiation: <flurry::node::TreeNode<_, _>>::get_tree_node |
1017 | | } |
1018 | | |
1019 | | /* ----------------------------------------------------------------- */ |
1020 | | |
1021 | | macro_rules! treenode { |
1022 | | ($pointer:ident) => { |
1023 | | unsafe { Self::get_tree_node($pointer) } |
1024 | | }; |
1025 | | } |
1026 | | // Red-black tree methods, all adapted from CLR |
1027 | | impl<K, V> TreeNode<K, V> { |
1028 | | // NOTE: these functions can be executed only when creating a new TreeBin or |
1029 | | // while holding the `write_lock`. Thus, we can use `Relaxed` memory |
1030 | | // operations everywhere. |
1031 | 0 | fn rotate_left<'g>( |
1032 | 0 | mut root: Shared<'g, BinEntry<K, V>>, |
1033 | 0 | p: Shared<'g, BinEntry<K, V>>, |
1034 | 0 | guard: &'g Guard<'_>, |
1035 | 0 | ) -> Shared<'g, BinEntry<K, V>> { |
1036 | 0 | if p.is_null() { |
1037 | 0 | return root; |
1038 | 0 | } |
1039 | | // safety: the containing TreeBin of all TreeNodes was read under our |
1040 | | // guard, at which point the tree structure was valid. Since our guard |
1041 | | // marks the current thread as active, the TreeNodes remain valid for |
1042 | | // at least as long as we hold onto the guard. |
1043 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
1044 | 0 | let p_deref = treenode!(p); |
1045 | 0 | let right = p_deref.right.load(Ordering::Relaxed, guard); |
1046 | 0 | if right.is_null() { |
1047 | | // there is no right successor to rotate left |
1048 | 0 | return root; |
1049 | 0 | } |
1050 | 0 | let right_deref = treenode!(right); |
1051 | 0 | let right_left = right_deref.left.load(Ordering::Relaxed, guard); |
1052 | 0 | p_deref.right.store(right_left, Ordering::Relaxed); |
1053 | 0 | if !right_left.is_null() { |
1054 | 0 | treenode!(right_left).parent.store(p, Ordering::Relaxed); |
1055 | 0 | } |
1056 | | |
1057 | 0 | let p_parent = p_deref.parent.load(Ordering::Relaxed, guard); |
1058 | 0 | right_deref.parent.store(p_parent, Ordering::Relaxed); |
1059 | 0 | if p_parent.is_null() { |
1060 | 0 | root = right; |
1061 | 0 | right_deref.red.store(false, Ordering::Relaxed); |
1062 | 0 | } else { |
1063 | 0 | let p_parent_deref = treenode!(p_parent); |
1064 | 0 | if p_parent_deref.left.load(Ordering::Relaxed, guard) == p { |
1065 | 0 | p_parent_deref.left.store(right, Ordering::Relaxed); |
1066 | 0 | } else { |
1067 | 0 | p_parent_deref.right.store(right, Ordering::Relaxed); |
1068 | 0 | } |
1069 | | } |
1070 | 0 | right_deref.left.store(p, Ordering::Relaxed); |
1071 | 0 | p_deref.parent.store(right, Ordering::Relaxed); |
1072 | | |
1073 | 0 | root |
1074 | 0 | } Unexecuted instantiation: <flurry::node::TreeNode<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::rotate_left Unexecuted instantiation: <flurry::node::TreeNode<_, _>>::rotate_left |
1075 | | |
1076 | 0 | fn rotate_right<'g>( |
1077 | 0 | mut root: Shared<'g, BinEntry<K, V>>, |
1078 | 0 | p: Shared<'g, BinEntry<K, V>>, |
1079 | 0 | guard: &'g Guard<'_>, |
1080 | 0 | ) -> Shared<'g, BinEntry<K, V>> { |
1081 | 0 | if p.is_null() { |
1082 | 0 | return root; |
1083 | 0 | } |
1084 | | // safety: the containing TreeBin of all TreeNodes was read under our |
1085 | | // guard, at which point the tree structure was valid. Since our guard |
1086 | | // marks the current thread as active, the TreeNodes remain valid for |
1087 | | // at least as long as we hold onto the guard. |
1088 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
1089 | 0 | let p_deref = treenode!(p); |
1090 | 0 | let left = p_deref.left.load(Ordering::Relaxed, guard); |
1091 | 0 | if left.is_null() { |
1092 | | // there is no left successor to rotate right |
1093 | 0 | return root; |
1094 | 0 | } |
1095 | 0 | let left_deref = treenode!(left); |
1096 | 0 | let left_right = left_deref.right.load(Ordering::Relaxed, guard); |
1097 | 0 | p_deref.left.store(left_right, Ordering::Relaxed); |
1098 | 0 | if !left_right.is_null() { |
1099 | 0 | treenode!(left_right).parent.store(p, Ordering::Relaxed); |
1100 | 0 | } |
1101 | | |
1102 | 0 | let p_parent = p_deref.parent.load(Ordering::Relaxed, guard); |
1103 | 0 | left_deref.parent.store(p_parent, Ordering::Relaxed); |
1104 | 0 | if p_parent.is_null() { |
1105 | 0 | root = left; |
1106 | 0 | left_deref.red.store(false, Ordering::Relaxed); |
1107 | 0 | } else { |
1108 | 0 | let p_parent_deref = treenode!(p_parent); |
1109 | 0 | if p_parent_deref.right.load(Ordering::Relaxed, guard) == p { |
1110 | 0 | p_parent_deref.right.store(left, Ordering::Relaxed); |
1111 | 0 | } else { |
1112 | 0 | p_parent_deref.left.store(left, Ordering::Relaxed); |
1113 | 0 | } |
1114 | | } |
1115 | 0 | left_deref.right.store(p, Ordering::Relaxed); |
1116 | 0 | p_deref.parent.store(left, Ordering::Relaxed); |
1117 | | |
1118 | 0 | root |
1119 | 0 | } Unexecuted instantiation: <flurry::node::TreeNode<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::rotate_right Unexecuted instantiation: <flurry::node::TreeNode<_, _>>::rotate_right |
1120 | | |
1121 | 0 | fn balance_insertion<'g>( |
1122 | 0 | mut root: Shared<'g, BinEntry<K, V>>, |
1123 | 0 | mut x: Shared<'g, BinEntry<K, V>>, |
1124 | 0 | guard: &'g Guard<'_>, |
1125 | 0 | ) -> Shared<'g, BinEntry<K, V>> { |
1126 | | // safety: the containing TreeBin of all TreeNodes was read under our |
1127 | | // guard, at which point the tree structure was valid. Since our guard |
1128 | | // marks the current thread as active, the TreeNodes remain valid for |
1129 | | // at least as long as we hold onto the guard. |
1130 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
1131 | 0 | treenode!(x).red.store(true, Ordering::Relaxed); |
1132 | | |
1133 | | let mut x_parent: Shared<'_, BinEntry<K, V>>; |
1134 | | let mut x_parent_parent: Shared<'_, BinEntry<K, V>>; |
1135 | | let mut x_parent_parent_left: Shared<'_, BinEntry<K, V>>; |
1136 | | let mut x_parent_parent_right: Shared<'_, BinEntry<K, V>>; |
1137 | | loop { |
1138 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1139 | 0 | if x_parent.is_null() { |
1140 | 0 | treenode!(x).red.store(false, Ordering::Relaxed); |
1141 | 0 | return x; |
1142 | 0 | } |
1143 | 0 | x_parent_parent = treenode!(x_parent).parent.load(Ordering::Relaxed, guard); |
1144 | 0 | if !treenode!(x_parent).red.load(Ordering::Relaxed) || x_parent_parent.is_null() { |
1145 | 0 | return root; |
1146 | 0 | } |
1147 | 0 | x_parent_parent_left = treenode!(x_parent_parent) |
1148 | 0 | .left |
1149 | 0 | .load(Ordering::Relaxed, guard); |
1150 | 0 | if x_parent == x_parent_parent_left { |
1151 | 0 | x_parent_parent_right = treenode!(x_parent_parent) |
1152 | 0 | .right |
1153 | 0 | .load(Ordering::Relaxed, guard); |
1154 | 0 | if !x_parent_parent_right.is_null() |
1155 | 0 | && treenode!(x_parent_parent_right).red.load(Ordering::Relaxed) |
1156 | 0 | { |
1157 | 0 | treenode!(x_parent_parent_right) |
1158 | 0 | .red |
1159 | 0 | .store(false, Ordering::Relaxed); |
1160 | 0 | treenode!(x_parent).red.store(false, Ordering::Relaxed); |
1161 | 0 | treenode!(x_parent_parent) |
1162 | 0 | .red |
1163 | 0 | .store(true, Ordering::Relaxed); |
1164 | 0 | x = x_parent_parent; |
1165 | 0 | } else { |
1166 | 0 | if x == treenode!(x_parent).right.load(Ordering::Relaxed, guard) { |
1167 | 0 | x = x_parent; |
1168 | 0 | root = Self::rotate_left(root, x, guard); |
1169 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1170 | 0 | x_parent_parent = if x_parent.is_null() { |
1171 | 0 | Shared::null() |
1172 | | } else { |
1173 | 0 | treenode!(x_parent).parent.load(Ordering::Relaxed, guard) |
1174 | | }; |
1175 | 0 | } |
1176 | 0 | if !x_parent.is_null() { |
1177 | 0 | treenode!(x_parent).red.store(false, Ordering::Relaxed); |
1178 | 0 | if !x_parent_parent.is_null() { |
1179 | 0 | treenode!(x_parent_parent) |
1180 | 0 | .red |
1181 | 0 | .store(true, Ordering::Relaxed); |
1182 | 0 | root = Self::rotate_right(root, x_parent_parent, guard); |
1183 | 0 | } |
1184 | 0 | } |
1185 | | } |
1186 | 0 | } else if !x_parent_parent_left.is_null() |
1187 | 0 | && treenode!(x_parent_parent_left).red.load(Ordering::Relaxed) |
1188 | 0 | { |
1189 | 0 | treenode!(x_parent_parent_left) |
1190 | 0 | .red |
1191 | 0 | .store(false, Ordering::Relaxed); |
1192 | 0 | treenode!(x_parent).red.store(false, Ordering::Relaxed); |
1193 | 0 | treenode!(x_parent_parent) |
1194 | 0 | .red |
1195 | 0 | .store(true, Ordering::Relaxed); |
1196 | 0 | x = x_parent_parent; |
1197 | 0 | } else { |
1198 | 0 | if x == treenode!(x_parent).left.load(Ordering::Relaxed, guard) { |
1199 | 0 | x = x_parent; |
1200 | 0 | root = Self::rotate_right(root, x, guard); |
1201 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1202 | 0 | x_parent_parent = if x_parent.is_null() { |
1203 | 0 | Shared::null() |
1204 | | } else { |
1205 | 0 | treenode!(x_parent).parent.load(Ordering::Relaxed, guard) |
1206 | | }; |
1207 | 0 | } |
1208 | 0 | if !x_parent.is_null() { |
1209 | 0 | treenode!(x_parent).red.store(false, Ordering::Relaxed); |
1210 | 0 | if !x_parent_parent.is_null() { |
1211 | 0 | treenode!(x_parent_parent) |
1212 | 0 | .red |
1213 | 0 | .store(true, Ordering::Relaxed); |
1214 | 0 | root = Self::rotate_left(root, x_parent_parent, guard); |
1215 | 0 | } |
1216 | 0 | } |
1217 | | } |
1218 | | } |
1219 | 0 | } Unexecuted instantiation: <flurry::node::TreeNode<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>::balance_insertion Unexecuted instantiation: <flurry::node::TreeNode<_, _>>::balance_insertion |
1220 | | |
1221 | 0 | fn balance_deletion<'g>( |
1222 | 0 | mut root: Shared<'g, BinEntry<K, V>>, |
1223 | 0 | mut x: Shared<'g, BinEntry<K, V>>, |
1224 | 0 | guard: &'g Guard<'_>, |
1225 | 0 | ) -> Shared<'g, BinEntry<K, V>> { |
1226 | | let mut x_parent: Shared<'_, BinEntry<K, V>>; |
1227 | | let mut x_parent_left: Shared<'_, BinEntry<K, V>>; |
1228 | | let mut x_parent_right: Shared<'_, BinEntry<K, V>>; |
1229 | | // safety: the containing TreeBin of all TreeNodes was read under our |
1230 | | // guard, at which point the tree structure was valid. Since our guard |
1231 | | // marks the current thread as active, the TreeNodes remain valid for at |
1232 | | // least as long as we hold onto the guard. |
1233 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
1234 | | loop { |
1235 | 0 | if x.is_null() || x == root { |
1236 | 0 | return root; |
1237 | 0 | } |
1238 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1239 | 0 | if x_parent.is_null() { |
1240 | 0 | treenode!(x).red.store(false, Ordering::Relaxed); |
1241 | 0 | return x; |
1242 | 0 | } else if treenode!(x).red.load(Ordering::Relaxed) { |
1243 | 0 | treenode!(x).red.store(false, Ordering::Relaxed); |
1244 | 0 | return root; |
1245 | 0 | } |
1246 | 0 | x_parent_left = treenode!(x_parent).left.load(Ordering::Relaxed, guard); |
1247 | 0 | if x_parent_left == x { |
1248 | 0 | x_parent_right = treenode!(x_parent).right.load(Ordering::Relaxed, guard); |
1249 | 0 | if !x_parent_right.is_null() |
1250 | 0 | && treenode!(x_parent_right).red.load(Ordering::Relaxed) |
1251 | | { |
1252 | 0 | treenode!(x_parent_right) |
1253 | 0 | .red |
1254 | 0 | .store(false, Ordering::Relaxed); |
1255 | 0 | treenode!(x_parent).red.store(true, Ordering::Relaxed); |
1256 | 0 | root = Self::rotate_left(root, x_parent, guard); |
1257 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1258 | 0 | x_parent_right = if x_parent.is_null() { |
1259 | 0 | Shared::null() |
1260 | | } else { |
1261 | 0 | treenode!(x_parent).right.load(Ordering::Relaxed, guard) |
1262 | | }; |
1263 | 0 | } |
1264 | 0 | if x_parent_right.is_null() { |
1265 | 0 | x = x_parent; |
1266 | 0 | continue; |
1267 | 0 | } |
1268 | 0 | let s_left = treenode!(x_parent_right) |
1269 | 0 | .left |
1270 | 0 | .load(Ordering::Relaxed, guard); |
1271 | 0 | let mut s_right = treenode!(x_parent_right) |
1272 | 0 | .right |
1273 | 0 | .load(Ordering::Relaxed, guard); |
1274 | | |
1275 | 0 | if (s_right.is_null() || !treenode!(s_right).red.load(Ordering::Relaxed)) |
1276 | 0 | && (s_left.is_null() || !treenode!(s_left).red.load(Ordering::Relaxed)) |
1277 | | { |
1278 | 0 | treenode!(x_parent_right).red.store(true, Ordering::Relaxed); |
1279 | 0 | x = x_parent; |
1280 | 0 | continue; |
1281 | 0 | } |
1282 | 0 | if s_right.is_null() || !treenode!(s_right).red.load(Ordering::Relaxed) { |
1283 | 0 | if !s_left.is_null() { |
1284 | 0 | treenode!(s_left).red.store(false, Ordering::Relaxed); |
1285 | 0 | } |
1286 | 0 | treenode!(x_parent_right).red.store(true, Ordering::Relaxed); |
1287 | 0 | root = Self::rotate_right(root, x_parent_right, guard); |
1288 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1289 | 0 | x_parent_right = if x_parent.is_null() { |
1290 | 0 | Shared::null() |
1291 | | } else { |
1292 | 0 | treenode!(x_parent).right.load(Ordering::Relaxed, guard) |
1293 | | }; |
1294 | 0 | } |
1295 | 0 | if !x_parent_right.is_null() { |
1296 | 0 | treenode!(x_parent_right).red.store( |
1297 | 0 | if x_parent.is_null() { |
1298 | 0 | false |
1299 | | } else { |
1300 | 0 | treenode!(x_parent).red.load(Ordering::Relaxed) |
1301 | | }, |
1302 | 0 | Ordering::Relaxed, |
1303 | | ); |
1304 | 0 | s_right = treenode!(x_parent_right) |
1305 | 0 | .right |
1306 | 0 | .load(Ordering::Relaxed, guard); |
1307 | 0 | if !s_right.is_null() { |
1308 | 0 | treenode!(s_right).red.store(false, Ordering::Relaxed); |
1309 | 0 | } |
1310 | 0 | } |
1311 | 0 | if !x_parent.is_null() { |
1312 | 0 | treenode!(x_parent).red.store(false, Ordering::Relaxed); |
1313 | 0 | root = Self::rotate_left(root, x_parent, guard); |
1314 | 0 | } |
1315 | | } else { |
1316 | | // symmetric |
1317 | 0 | if !x_parent_left.is_null() && treenode!(x_parent_left).red.load(Ordering::Relaxed) |
1318 | | { |
1319 | 0 | treenode!(x_parent_left).red.store(false, Ordering::Relaxed); |
1320 | 0 | treenode!(x_parent).red.store(true, Ordering::Relaxed); |
1321 | 0 | root = Self::rotate_right(root, x_parent, guard); |
1322 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1323 | 0 | x_parent_left = if x_parent.is_null() { |
1324 | 0 | Shared::null() |
1325 | | } else { |
1326 | 0 | treenode!(x_parent).left.load(Ordering::Relaxed, guard) |
1327 | | }; |
1328 | 0 | } |
1329 | 0 | if x_parent_left.is_null() { |
1330 | 0 | x = x_parent; |
1331 | 0 | continue; |
1332 | 0 | } |
1333 | 0 | let mut s_left = treenode!(x_parent_left).left.load(Ordering::Relaxed, guard); |
1334 | 0 | let s_right = treenode!(x_parent_left) |
1335 | 0 | .right |
1336 | 0 | .load(Ordering::Relaxed, guard); |
1337 | | |
1338 | 0 | if (s_left.is_null() || !treenode!(s_left).red.load(Ordering::Relaxed)) |
1339 | 0 | && (s_right.is_null() || !treenode!(s_right).red.load(Ordering::Relaxed)) |
1340 | | { |
1341 | 0 | treenode!(x_parent_left).red.store(true, Ordering::Relaxed); |
1342 | 0 | x = x_parent; |
1343 | 0 | continue; |
1344 | 0 | } |
1345 | 0 | if s_left.is_null() || !treenode!(s_left).red.load(Ordering::Relaxed) { |
1346 | 0 | if !s_right.is_null() { |
1347 | 0 | treenode!(s_right).red.store(false, Ordering::Relaxed); |
1348 | 0 | } |
1349 | 0 | treenode!(x_parent_left).red.store(true, Ordering::Relaxed); |
1350 | 0 | root = Self::rotate_left(root, x_parent_left, guard); |
1351 | 0 | x_parent = treenode!(x).parent.load(Ordering::Relaxed, guard); |
1352 | 0 | x_parent_left = if x_parent.is_null() { |
1353 | 0 | Shared::null() |
1354 | | } else { |
1355 | 0 | treenode!(x_parent).left.load(Ordering::Relaxed, guard) |
1356 | | }; |
1357 | 0 | } |
1358 | 0 | if !x_parent_left.is_null() { |
1359 | 0 | treenode!(x_parent_left).red.store( |
1360 | 0 | if x_parent.is_null() { |
1361 | 0 | false |
1362 | | } else { |
1363 | 0 | treenode!(x_parent).red.load(Ordering::Relaxed) |
1364 | | }, |
1365 | 0 | Ordering::Relaxed, |
1366 | | ); |
1367 | 0 | s_left = treenode!(x_parent_left).left.load(Ordering::Relaxed, guard); |
1368 | 0 | if !s_left.is_null() { |
1369 | 0 | treenode!(s_left).red.store(false, Ordering::Relaxed); |
1370 | 0 | } |
1371 | 0 | } |
1372 | 0 | if !x_parent.is_null() { |
1373 | 0 | treenode!(x_parent).red.store(false, Ordering::Relaxed); |
1374 | 0 | root = Self::rotate_right(root, x_parent, guard); |
1375 | 0 | } |
1376 | | } |
1377 | 0 | x = root; |
1378 | | } |
1379 | 0 | } |
1380 | | /// Checks invariants recursively for the tree of Nodes rootet at t. |
1381 | 0 | fn check_invariants<'g>(t: Shared<'g, BinEntry<K, V>>, guard: &'g Guard<'_>) { |
1382 | | // safety: the containing TreeBin of all TreeNodes was read under our |
1383 | | // guard, at which point the tree structure was valid. Since our guard |
1384 | | // marks the current thread as active, the TreeNodes remain valid for |
1385 | | // at least as long as we hold onto the guard. |
1386 | | // Structurally, TreeNodes always point to TreeNodes, so this is sound. |
1387 | 0 | let t_deref = treenode!(t); |
1388 | 0 | let t_parent = t_deref.parent.load(Ordering::Relaxed, guard); |
1389 | 0 | let t_left = t_deref.left.load(Ordering::Relaxed, guard); |
1390 | 0 | let t_right = t_deref.right.load(Ordering::Relaxed, guard); |
1391 | 0 | let t_back = t_deref.prev.load(Ordering::Relaxed, guard); |
1392 | 0 | let t_next = t_deref.node.next.load(Ordering::Relaxed, guard); |
1393 | | |
1394 | 0 | if !t_back.is_null() { |
1395 | 0 | let t_back_deref = treenode!(t_back); |
1396 | 0 | assert_eq!( |
1397 | 0 | t_back_deref.node.next.load(Ordering::Relaxed, guard), |
1398 | | t, |
1399 | 0 | "A TreeNode's `prev` node did not point back to it as its `next` node" |
1400 | | ); |
1401 | 0 | } |
1402 | 0 | if !t_next.is_null() { |
1403 | 0 | let t_next_deref = treenode!(t_next); |
1404 | 0 | assert_eq!( |
1405 | 0 | t_next_deref.prev.load(Ordering::Relaxed, guard), |
1406 | | t, |
1407 | 0 | "A TreeNode's `next` node did not point back to it as its `prev` node" |
1408 | | ); |
1409 | 0 | } |
1410 | 0 | if !t_parent.is_null() { |
1411 | 0 | let t_parent_deref = treenode!(t_parent); |
1412 | 0 | assert!( |
1413 | 0 | t_parent_deref.left.load(Ordering::Relaxed, guard) == t |
1414 | 0 | || t_parent_deref.right.load(Ordering::Relaxed, guard) == t, |
1415 | 0 | "A TreeNode's `parent` node did not point back to it as either its `left` or `right` child" |
1416 | | ); |
1417 | 0 | } |
1418 | 0 | if !t_left.is_null() { |
1419 | 0 | let t_left_deref = treenode!(t_left); |
1420 | 0 | assert_eq!( |
1421 | 0 | t_left_deref.parent.load(Ordering::Relaxed, guard), |
1422 | | t, |
1423 | 0 | "A TreeNode's `left` child did not point back to it as its `parent` node" |
1424 | | ); |
1425 | 0 | assert!( |
1426 | 0 | t_left_deref.node.hash <= t_deref.node.hash, |
1427 | 0 | "A TreeNode's `left` child had a greater hash value than it" |
1428 | | ); |
1429 | 0 | } |
1430 | 0 | if !t_right.is_null() { |
1431 | 0 | let t_right_deref = treenode!(t_right); |
1432 | 0 | assert_eq!( |
1433 | 0 | t_right_deref.parent.load(Ordering::Relaxed, guard), |
1434 | | t, |
1435 | 0 | "A TreeNode's `right` child did not point back to it as its `parent` node" |
1436 | | ); |
1437 | 0 | assert!( |
1438 | 0 | t_right_deref.node.hash >= t_deref.node.hash, |
1439 | 0 | "A TreeNode's `right` child had a smaller hash value than it" |
1440 | | ); |
1441 | 0 | } |
1442 | 0 | if t_deref.red.load(Ordering::Relaxed) && !t_left.is_null() && !t_right.is_null() { |
1443 | | // if we are red, at least one of our children must be black |
1444 | 0 | let t_left_deref = treenode!(t_left); |
1445 | 0 | let t_right_deref = treenode!(t_right); |
1446 | 0 | assert!( |
1447 | 0 | !(t_left_deref.red.load(Ordering::Relaxed) |
1448 | 0 | && t_right_deref.red.load(Ordering::Relaxed)), |
1449 | 0 | "A red TreeNode's two children were both also red" |
1450 | | ); |
1451 | 0 | } |
1452 | 0 | if !t_left.is_null() { |
1453 | 0 | Self::check_invariants(t_left, guard) |
1454 | 0 | } |
1455 | 0 | if !t_right.is_null() { |
1456 | 0 | Self::check_invariants(t_right, guard) |
1457 | 0 | } |
1458 | 0 | } |
1459 | | } |
1460 | | |
1461 | | #[cfg(test)] |
1462 | | mod tests { |
1463 | | use super::*; |
1464 | | use std::sync::atomic::Ordering; |
1465 | | |
1466 | | fn new_node(hash: u64, key: usize, value: usize, collector: &Collector) -> Node<usize, usize> { |
1467 | | Node { |
1468 | | hash, |
1469 | | key, |
1470 | | value: Atomic::from(Shared::boxed(value, collector)), |
1471 | | next: Atomic::null(), |
1472 | | lock: Mutex::new(()), |
1473 | | } |
1474 | | } |
1475 | | |
1476 | | #[test] |
1477 | | fn find_node_no_match() { |
1478 | | let collector = seize::Collector::new(); |
1479 | | let guard = collector.enter(); |
1480 | | |
1481 | | let node2 = new_node(4, 5, 6, &collector); |
1482 | | let entry2 = BinEntry::Node(node2); |
1483 | | let node1 = new_node(1, 2, 3, &collector); |
1484 | | node1 |
1485 | | .next |
1486 | | .store(Shared::boxed(entry2, &collector), Ordering::SeqCst); |
1487 | | let entry1 = Shared::boxed(BinEntry::Node(node1), &collector); |
1488 | | let mut tab = Table::from(vec![Atomic::from(entry1)], &collector); |
1489 | | |
1490 | | // safety: we have not yet dropped entry1 |
1491 | | assert!(tab.find(unsafe { entry1.deref() }, 1, &0, &guard).is_null()); |
1492 | | tab.drop_bins(); |
1493 | | } |
1494 | | |
1495 | | #[test] |
1496 | | fn find_node_single_match() { |
1497 | | let collector = seize::Collector::new(); |
1498 | | let guard = collector.enter(); |
1499 | | |
1500 | | let entry = Shared::boxed(BinEntry::Node(new_node(1, 2, 3, &collector)), &collector); |
1501 | | let mut tab = Table::from(vec![Atomic::from(entry)], &collector); |
1502 | | assert_eq!( |
1503 | | // safety: we have not yet dropped entry |
1504 | | unsafe { tab.find(entry.deref(), 1, &2, &guard).deref() } |
1505 | | .as_node() |
1506 | | .unwrap() |
1507 | | .key, |
1508 | | 2 |
1509 | | ); |
1510 | | tab.drop_bins(); |
1511 | | } |
1512 | | |
1513 | | #[test] |
1514 | | fn find_node_multi_match() { |
1515 | | let collector = seize::Collector::new(); |
1516 | | let guard = collector.enter(); |
1517 | | |
1518 | | let node2 = new_node(4, 5, 6, &collector); |
1519 | | let entry2 = BinEntry::Node(node2); |
1520 | | let node1 = new_node(1, 2, 3, &collector); |
1521 | | node1 |
1522 | | .next |
1523 | | .store(Shared::boxed(entry2, &collector), Ordering::SeqCst); |
1524 | | let entry1 = Shared::boxed(BinEntry::Node(node1), &collector); |
1525 | | let mut tab = Table::from(vec![Atomic::from(entry1)], &collector); |
1526 | | assert_eq!( |
1527 | | // safety: we have not yet dropped entry1 |
1528 | | unsafe { tab.find(entry1.deref(), 4, &5, &guard).deref() } |
1529 | | .as_node() |
1530 | | .unwrap() |
1531 | | .key, |
1532 | | 5 |
1533 | | ); |
1534 | | tab.drop_bins(); |
1535 | | } |
1536 | | |
1537 | | #[test] |
1538 | | fn find_moved_empty_bins_no_match() { |
1539 | | let collector = seize::Collector::new(); |
1540 | | let guard = collector.enter(); |
1541 | | |
1542 | | let mut table = Table::<usize, usize>::new(1, &collector); |
1543 | | let table2 = Shared::boxed(Table::new(1, &collector), &collector); |
1544 | | |
1545 | | let entry = table.get_moved(table2, &guard); |
1546 | | table.store_bin(0, entry); |
1547 | | assert!(table |
1548 | | .find(&collector.link_value(BinEntry::Moved), 1, &2, &guard) |
1549 | | .is_null()); |
1550 | | table.drop_bins(); |
1551 | | // safety: table2 is still valid and not accessed by different threads |
1552 | | unsafe { &mut *table2.as_ptr() }.drop_bins(); |
1553 | | unsafe { guard.retire_shared(table2) }; |
1554 | | } |
1555 | | |
1556 | | #[test] |
1557 | | fn find_moved_no_bins_no_match() { |
1558 | | let collector = seize::Collector::new(); |
1559 | | let guard = collector.enter(); |
1560 | | |
1561 | | let mut table = Table::<usize, usize>::new(1, &collector); |
1562 | | let table2 = Shared::boxed(Table::new(0, &collector), &collector); |
1563 | | let entry = table.get_moved(table2, &guard); |
1564 | | table.store_bin(0, entry); |
1565 | | assert!(table |
1566 | | .find(&collector.link_value(BinEntry::Moved), 1, &2, &guard) |
1567 | | .is_null()); |
1568 | | table.drop_bins(); |
1569 | | // safety: table2 is still valid and not accessed by different threads |
1570 | | unsafe { &mut *table2.as_ptr() }.drop_bins(); |
1571 | | unsafe { guard.retire_shared(table2) }; |
1572 | | } |
1573 | | |
1574 | | #[test] |
1575 | | fn find_moved_null_bin_no_match() { |
1576 | | let collector = seize::Collector::new(); |
1577 | | let guard = collector.enter(); |
1578 | | |
1579 | | let mut table = Table::<usize, usize>::new(1, &collector); |
1580 | | let table2 = Shared::boxed(Table::new(2, &collector), &collector); |
1581 | | unsafe { table2.deref() }.store_bin( |
1582 | | 0, |
1583 | | Shared::boxed(BinEntry::Node(new_node(1, 2, 3, &collector)), &collector), |
1584 | | ); |
1585 | | let entry = table.get_moved(table2, &guard); |
1586 | | table.store_bin(0, entry); |
1587 | | assert!(table |
1588 | | .find(&collector.link_value(BinEntry::Moved), 0, &1, &guard) |
1589 | | .is_null()); |
1590 | | table.drop_bins(); |
1591 | | // safety: table2 is still valid and not accessed by different threads |
1592 | | unsafe { &mut *table2.as_ptr() }.drop_bins(); |
1593 | | unsafe { guard.retire_shared(table2) }; |
1594 | | } |
1595 | | |
1596 | | #[test] |
1597 | | fn find_moved_match() { |
1598 | | let collector = seize::Collector::new(); |
1599 | | let guard = collector.enter(); |
1600 | | |
1601 | | let mut table = Table::<usize, usize>::new(1, &collector); |
1602 | | let table2 = Shared::boxed(Table::new(1, &collector), &collector); |
1603 | | // safety: table2 is still valid |
1604 | | unsafe { table2.deref() }.store_bin( |
1605 | | 0, |
1606 | | Shared::boxed(BinEntry::Node(new_node(1, 2, 3, &collector)), &collector), |
1607 | | ); |
1608 | | let entry = table.get_moved(table2, &guard); |
1609 | | table.store_bin(0, entry); |
1610 | | assert_eq!( |
1611 | | // safety: entry is still valid since the table was not dropped and the |
1612 | | // entry was not removed |
1613 | | unsafe { |
1614 | | table |
1615 | | .find(&collector.link_value(BinEntry::Moved), 1, &2, &guard) |
1616 | | .deref() |
1617 | | } |
1618 | | .as_node() |
1619 | | .unwrap() |
1620 | | .key, |
1621 | | 2 |
1622 | | ); |
1623 | | table.drop_bins(); |
1624 | | // safety: table2 is still valid and not accessed by different threads |
1625 | | unsafe { &mut *table2.as_ptr() }.drop_bins(); |
1626 | | unsafe { guard.retire_shared(table2) }; |
1627 | | } |
1628 | | } |