/rust/registry/src/index.crates.io-1949cf8c6b5b557f/indexmap-2.13.0/src/inner/extract.rs
Line | Count | Source |
1 | | #![allow(unsafe_code)] |
2 | | |
3 | | use super::{Bucket, Core}; |
4 | | use crate::util::simplify_range; |
5 | | |
6 | | use core::ops::RangeBounds; |
7 | | |
8 | | impl<K, V> Core<K, V> { |
9 | | #[track_caller] |
10 | 0 | pub(crate) fn extract<R>(&mut self, range: R) -> ExtractCore<'_, K, V> |
11 | 0 | where |
12 | 0 | R: RangeBounds<usize>, |
13 | | { |
14 | 0 | let range = simplify_range(range, self.entries.len()); |
15 | | |
16 | | // SAFETY: We must have consistent lengths to start, so that's a hard assertion. |
17 | | // Then the worst `set_len` can do is leak items if `ExtractCore` doesn't drop. |
18 | 0 | assert_eq!(self.entries.len(), self.indices.len()); |
19 | 0 | unsafe { |
20 | 0 | self.entries.set_len(range.start); |
21 | 0 | } |
22 | 0 | ExtractCore { |
23 | 0 | map: self, |
24 | 0 | new_len: range.start, |
25 | 0 | current: range.start, |
26 | 0 | end: range.end, |
27 | 0 | } |
28 | 0 | } |
29 | | } |
30 | | |
31 | | pub(crate) struct ExtractCore<'a, K, V> { |
32 | | map: &'a mut Core<K, V>, |
33 | | new_len: usize, |
34 | | current: usize, |
35 | | end: usize, |
36 | | } |
37 | | |
38 | | impl<K, V> Drop for ExtractCore<'_, K, V> { |
39 | 0 | fn drop(&mut self) { |
40 | 0 | let old_len = self.map.indices.len(); |
41 | 0 | let mut new_len = self.new_len; |
42 | | |
43 | 0 | debug_assert!(new_len <= self.current); |
44 | 0 | debug_assert!(self.current <= self.end); |
45 | 0 | debug_assert!(self.current <= old_len); |
46 | 0 | debug_assert!(old_len <= self.map.entries.capacity()); |
47 | | |
48 | | // SAFETY: We assume `new_len` and `current` were correctly maintained by the iterator. |
49 | | // So `entries[new_len..current]` were extracted, but the rest before and after are valid. |
50 | | unsafe { |
51 | 0 | if new_len == self.current { |
52 | 0 | // Nothing was extracted, so any remaining items can be left in place. |
53 | 0 | new_len = old_len; |
54 | 0 | } else if self.current < old_len { |
55 | 0 | // Need to shift the remaining items down. |
56 | 0 | let tail_len = old_len - self.current; |
57 | 0 | let base = self.map.entries.as_mut_ptr(); |
58 | 0 | let src = base.add(self.current); |
59 | 0 | let dest = base.add(new_len); |
60 | 0 | src.copy_to(dest, tail_len); |
61 | 0 | new_len += tail_len; |
62 | 0 | } |
63 | 0 | self.map.entries.set_len(new_len); |
64 | | } |
65 | | |
66 | 0 | if new_len != old_len { |
67 | 0 | // We don't keep track of *which* items were extracted, so reindex everything. |
68 | 0 | self.map.rebuild_hash_table(); |
69 | 0 | } |
70 | 0 | } |
71 | | } |
72 | | |
73 | | impl<K, V> ExtractCore<'_, K, V> { |
74 | 0 | pub(crate) fn extract_if<F>(&mut self, mut pred: F) -> Option<Bucket<K, V>> |
75 | 0 | where |
76 | 0 | F: FnMut(&mut Bucket<K, V>) -> bool, |
77 | | { |
78 | 0 | debug_assert!(self.end <= self.map.entries.capacity()); |
79 | | |
80 | 0 | let base = self.map.entries.as_mut_ptr(); |
81 | 0 | while self.current < self.end { |
82 | | // SAFETY: We're maintaining both indices within bounds of the original entries, so |
83 | | // 0..new_len and current..indices.len() are always valid items for our Drop to keep. |
84 | | unsafe { |
85 | 0 | let item = base.add(self.current); |
86 | 0 | if pred(&mut *item) { |
87 | | // Extract it! |
88 | 0 | self.current += 1; |
89 | 0 | return Some(item.read()); |
90 | | } else { |
91 | | // Keep it, shifting it down if needed. |
92 | 0 | if self.new_len != self.current { |
93 | 0 | debug_assert!(self.new_len < self.current); |
94 | 0 | let dest = base.add(self.new_len); |
95 | 0 | item.copy_to_nonoverlapping(dest, 1); |
96 | 0 | } |
97 | 0 | self.current += 1; |
98 | 0 | self.new_len += 1; |
99 | | } |
100 | | } |
101 | | } |
102 | 0 | None |
103 | 0 | } |
104 | | |
105 | 0 | pub(crate) fn remaining(&self) -> usize { |
106 | 0 | self.end - self.current |
107 | 0 | } |
108 | | } |