/rust/registry/src/index.crates.io-1949cf8c6b5b557f/indexmap-2.13.0/src/inner.rs
Line | Count | Source |
1 | | //! This is the core implementation that doesn't depend on the hasher at all. |
2 | | //! |
3 | | //! The methods of `Core` don't use any Hash properties of K. |
4 | | //! |
5 | | //! It's cleaner to separate them out, then the compiler checks that we are not |
6 | | //! using Hash at all in these methods. |
7 | | //! |
8 | | //! However, we should probably not let this show in the public API or docs. |
9 | | |
10 | | mod entry; |
11 | | mod extract; |
12 | | |
13 | | use alloc::vec::{self, Vec}; |
14 | | use core::mem; |
15 | | use core::ops::RangeBounds; |
16 | | use hashbrown::hash_table; |
17 | | |
18 | | use crate::util::simplify_range; |
19 | | use crate::{Bucket, Equivalent, HashValue, TryReserveError}; |
20 | | |
21 | | type Indices = hash_table::HashTable<usize>; |
22 | | type Entries<K, V> = Vec<Bucket<K, V>>; |
23 | | |
24 | | pub use entry::{OccupiedEntry, VacantEntry}; |
25 | | pub(crate) use extract::ExtractCore; |
26 | | |
27 | | /// Core of the map that does not depend on S |
28 | | #[cfg_attr(feature = "test_debug", derive(Debug))] |
29 | | pub(crate) struct Core<K, V> { |
30 | | /// indices mapping from the entry hash to its index. |
31 | | indices: Indices, |
32 | | /// entries is a dense vec maintaining entry order. |
33 | | entries: Entries<K, V>, |
34 | | } |
35 | | |
36 | | #[inline(always)] |
37 | 4.79M | fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + use<'_, K, V> { |
38 | 1.72M | move |&i| entries[i].hash.get() indexmap::inner::get_hash::<alloc::string::String, bson::bson::Bson>::{closure#0}Line | Count | Source | 38 | 1.72M | move |&i| entries[i].hash.get() |
Unexecuted instantiation: indexmap::inner::get_hash::<_, _>::{closure#0} |
39 | 4.79M | } indexmap::inner::get_hash::<alloc::string::String, bson::bson::Bson> Line | Count | Source | 37 | 4.79M | fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + use<'_, K, V> { | 38 | | move |&i| entries[i].hash.get() | 39 | 4.79M | } |
Unexecuted instantiation: indexmap::inner::get_hash::<_, _> |
40 | | |
41 | | #[inline] |
42 | 5.06M | fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>( |
43 | 5.06M | key: &'a Q, |
44 | 5.06M | entries: &'a [Bucket<K, V>], |
45 | 5.06M | ) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> { |
46 | 3.44M | move |&i| Q::equivalent(key, &entries[i].key) indexmap::inner::equivalent::<alloc::string::String, bson::bson::Bson, alloc::string::String>::{closure#0}Line | Count | Source | 46 | 3.17M | move |&i| Q::equivalent(key, &entries[i].key) |
indexmap::inner::equivalent::<alloc::string::String, bson::bson::Bson, str>::{closure#0}Line | Count | Source | 46 | 267k | move |&i| Q::equivalent(key, &entries[i].key) |
Unexecuted instantiation: indexmap::inner::equivalent::<_, _, _>::{closure#0} |
47 | 5.06M | } indexmap::inner::equivalent::<alloc::string::String, bson::bson::Bson, alloc::string::String> Line | Count | Source | 42 | 4.79M | fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>( | 43 | 4.79M | key: &'a Q, | 44 | 4.79M | entries: &'a [Bucket<K, V>], | 45 | 4.79M | ) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> { | 46 | | move |&i| Q::equivalent(key, &entries[i].key) | 47 | 4.79M | } |
indexmap::inner::equivalent::<alloc::string::String, bson::bson::Bson, str> Line | Count | Source | 42 | 265k | fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>( | 43 | 265k | key: &'a Q, | 44 | 265k | entries: &'a [Bucket<K, V>], | 45 | 265k | ) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> { | 46 | | move |&i| Q::equivalent(key, &entries[i].key) | 47 | 265k | } |
Unexecuted instantiation: indexmap::inner::equivalent::<_, _, _> |
48 | | |
49 | | #[inline] |
50 | 0 | fn erase_index(table: &mut Indices, hash: HashValue, index: usize) { |
51 | 0 | if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) { |
52 | 0 | entry.remove(); |
53 | 0 | } else if cfg!(debug_assertions) { |
54 | 0 | panic!("index not found"); |
55 | 0 | } |
56 | 0 | } |
57 | | |
58 | | #[inline] |
59 | 0 | fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) { |
60 | 0 | let index = table |
61 | 0 | .find_mut(hash.get(), move |&i| i == old) |
62 | 0 | .expect("index not found"); |
63 | 0 | *index = new; |
64 | 0 | } |
65 | | |
66 | | /// Inserts many entries into the indices table without reallocating, |
67 | | /// and without regard for duplication. |
68 | | /// |
69 | | /// ***Panics*** if there is not sufficient capacity already. |
70 | 0 | fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) { |
71 | 0 | assert!(indices.capacity() - indices.len() >= entries.len()); |
72 | 0 | for entry in entries { |
73 | 0 | indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!()); |
74 | | } |
75 | 0 | } |
76 | | |
77 | | impl<K, V> Clone for Core<K, V> |
78 | | where |
79 | | K: Clone, |
80 | | V: Clone, |
81 | | { |
82 | 0 | fn clone(&self) -> Self { |
83 | 0 | let mut new = Self::new(); |
84 | 0 | new.clone_from(self); |
85 | 0 | new |
86 | 0 | } Unexecuted instantiation: <indexmap::inner::Core<alloc::string::String, bson::bson::Bson> as core::clone::Clone>::clone Unexecuted instantiation: <indexmap::inner::Core<_, _> as core::clone::Clone>::clone |
87 | | |
88 | 0 | fn clone_from(&mut self, other: &Self) { |
89 | 0 | self.indices.clone_from(&other.indices); |
90 | 0 | if self.entries.capacity() < other.entries.len() { |
91 | 0 | // If we must resize, match the indices capacity. |
92 | 0 | let additional = other.entries.len() - self.entries.len(); |
93 | 0 | self.reserve_entries(additional); |
94 | 0 | } |
95 | 0 | self.entries.clone_from(&other.entries); |
96 | 0 | } Unexecuted instantiation: <indexmap::inner::Core<alloc::string::String, bson::bson::Bson> as core::clone::Clone>::clone_from Unexecuted instantiation: <indexmap::inner::Core<_, _> as core::clone::Clone>::clone_from |
97 | | } |
98 | | |
99 | | impl<K, V> Core<K, V> { |
100 | | /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`. |
101 | | const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / size_of::<Bucket<K, V>>(); |
102 | | |
103 | | #[inline] |
104 | 1.85M | pub(crate) const fn new() -> Self { |
105 | 1.85M | Core { |
106 | 1.85M | indices: Indices::new(), |
107 | 1.85M | entries: Vec::new(), |
108 | 1.85M | } |
109 | 1.85M | } <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::new Line | Count | Source | 104 | 1.85M | pub(crate) const fn new() -> Self { | 105 | 1.85M | Core { | 106 | 1.85M | indices: Indices::new(), | 107 | 1.85M | entries: Vec::new(), | 108 | 1.85M | } | 109 | 1.85M | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::new |
110 | | |
111 | | #[inline] |
112 | 0 | pub(crate) fn with_capacity(n: usize) -> Self { |
113 | 0 | Core { |
114 | 0 | indices: Indices::with_capacity(n), |
115 | 0 | entries: Vec::with_capacity(n), |
116 | 0 | } |
117 | 0 | } Unexecuted instantiation: <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::with_capacity Unexecuted instantiation: <indexmap::inner::Core<_, _>>::with_capacity |
118 | | |
119 | | #[inline] |
120 | 41.6k | pub(crate) fn into_entries(self) -> Entries<K, V> { |
121 | 41.6k | self.entries |
122 | 41.6k | } <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::into_entries Line | Count | Source | 120 | 41.6k | pub(crate) fn into_entries(self) -> Entries<K, V> { | 121 | 41.6k | self.entries | 122 | 41.6k | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::into_entries |
123 | | |
124 | | #[inline] |
125 | 613k | pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] { |
126 | 613k | &self.entries |
127 | 613k | } <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::as_entries Line | Count | Source | 125 | 613k | pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] { | 126 | 613k | &self.entries | 127 | 613k | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::as_entries |
128 | | |
129 | | #[inline] |
130 | 0 | pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] { |
131 | 0 | &mut self.entries |
132 | 0 | } Unexecuted instantiation: <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::as_entries_mut Unexecuted instantiation: <indexmap::inner::Core<_, _>>::as_entries_mut |
133 | | |
134 | 0 | pub(crate) fn with_entries<F>(&mut self, f: F) |
135 | 0 | where |
136 | 0 | F: FnOnce(&mut [Bucket<K, V>]), |
137 | | { |
138 | 0 | f(&mut self.entries); |
139 | 0 | self.rebuild_hash_table(); |
140 | 0 | } |
141 | | |
142 | | #[inline] |
143 | 106k | pub(crate) fn len(&self) -> usize { |
144 | 106k | debug_assert_eq!(self.entries.len(), self.indices.len()); |
145 | 106k | self.indices.len() |
146 | 106k | } <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::len Line | Count | Source | 143 | 106k | pub(crate) fn len(&self) -> usize { | 144 | 106k | debug_assert_eq!(self.entries.len(), self.indices.len()); | 145 | 106k | self.indices.len() | 146 | 106k | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::len |
147 | | |
148 | | #[inline] |
149 | 0 | pub(crate) fn capacity(&self) -> usize { |
150 | 0 | Ord::min(self.indices.capacity(), self.entries.capacity()) |
151 | 0 | } |
152 | | |
153 | 0 | pub(crate) fn clear(&mut self) { |
154 | 0 | self.indices.clear(); |
155 | 0 | self.entries.clear(); |
156 | 0 | } Unexecuted instantiation: <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::clear Unexecuted instantiation: <indexmap::inner::Core<_, _>>::clear |
157 | | |
158 | 0 | pub(crate) fn truncate(&mut self, len: usize) { |
159 | 0 | if len < self.len() { |
160 | 0 | self.erase_indices(len, self.entries.len()); |
161 | 0 | self.entries.truncate(len); |
162 | 0 | } |
163 | 0 | } |
164 | | |
165 | | #[track_caller] |
166 | 0 | pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>> |
167 | 0 | where |
168 | 0 | R: RangeBounds<usize>, |
169 | | { |
170 | 0 | let range = simplify_range(range, self.entries.len()); |
171 | 0 | self.erase_indices(range.start, range.end); |
172 | 0 | self.entries.drain(range) |
173 | 0 | } |
174 | | |
175 | | #[cfg(feature = "rayon")] |
176 | | pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>> |
177 | | where |
178 | | K: Send, |
179 | | V: Send, |
180 | | R: RangeBounds<usize>, |
181 | | { |
182 | | use rayon::iter::ParallelDrainRange; |
183 | | let range = simplify_range(range, self.entries.len()); |
184 | | self.erase_indices(range.start, range.end); |
185 | | self.entries.par_drain(range) |
186 | | } |
187 | | |
188 | | #[track_caller] |
189 | 0 | pub(crate) fn split_off(&mut self, at: usize) -> Self { |
190 | 0 | let len = self.entries.len(); |
191 | 0 | assert!( |
192 | 0 | at <= len, |
193 | 0 | "index out of bounds: the len is {len} but the index is {at}. Expected index <= len" |
194 | | ); |
195 | | |
196 | 0 | self.erase_indices(at, self.entries.len()); |
197 | 0 | let entries = self.entries.split_off(at); |
198 | | |
199 | 0 | let mut indices = Indices::with_capacity(entries.len()); |
200 | 0 | insert_bulk_no_grow(&mut indices, &entries); |
201 | 0 | Self { indices, entries } |
202 | 0 | } |
203 | | |
204 | | #[track_caller] |
205 | 0 | pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>) |
206 | 0 | where |
207 | 0 | R: RangeBounds<usize>, |
208 | | { |
209 | 0 | let range = simplify_range(range, self.len()); |
210 | 0 | self.erase_indices(range.start, self.entries.len()); |
211 | 0 | let entries = self.entries.split_off(range.end); |
212 | 0 | let drained = self.entries.split_off(range.start); |
213 | | |
214 | 0 | let mut indices = Indices::with_capacity(entries.len()); |
215 | 0 | insert_bulk_no_grow(&mut indices, &entries); |
216 | 0 | (Self { indices, entries }, drained.into_iter()) |
217 | 0 | } |
218 | | |
219 | | /// Append from another map without checking whether items already exist. |
220 | 0 | pub(crate) fn append_unchecked(&mut self, other: &mut Self) { |
221 | 0 | self.reserve(other.len()); |
222 | 0 | insert_bulk_no_grow(&mut self.indices, &other.entries); |
223 | 0 | self.entries.append(&mut other.entries); |
224 | 0 | other.indices.clear(); |
225 | 0 | } |
226 | | |
227 | | /// Reserve capacity for `additional` more key-value pairs. |
228 | 0 | pub(crate) fn reserve(&mut self, additional: usize) { |
229 | 0 | self.indices.reserve(additional, get_hash(&self.entries)); |
230 | | // Only grow entries if necessary, since we also round up capacity. |
231 | 0 | if additional > self.entries.capacity() - self.entries.len() { |
232 | 0 | self.reserve_entries(additional); |
233 | 0 | } |
234 | 0 | } |
235 | | |
236 | | /// Reserve capacity for `additional` more key-value pairs, without over-allocating. |
237 | 0 | pub(crate) fn reserve_exact(&mut self, additional: usize) { |
238 | 0 | self.indices.reserve(additional, get_hash(&self.entries)); |
239 | 0 | self.entries.reserve_exact(additional); |
240 | 0 | } |
241 | | |
242 | | /// Try to reserve capacity for `additional` more key-value pairs. |
243 | 0 | pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { |
244 | 0 | self.indices |
245 | 0 | .try_reserve(additional, get_hash(&self.entries)) |
246 | 0 | .map_err(TryReserveError::from_hashbrown)?; |
247 | | // Only grow entries if necessary, since we also round up capacity. |
248 | 0 | if additional > self.entries.capacity() - self.entries.len() { |
249 | 0 | self.try_reserve_entries(additional) |
250 | | } else { |
251 | 0 | Ok(()) |
252 | | } |
253 | 0 | } |
254 | | |
255 | | /// Try to reserve entries capacity, rounded up to match the indices |
256 | 0 | fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> { |
257 | | // Use a soft-limit on the maximum capacity, but if the caller explicitly |
258 | | // requested more, do it and let them have the resulting error. |
259 | 0 | let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); |
260 | 0 | let try_add = new_capacity - self.entries.len(); |
261 | 0 | if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { |
262 | 0 | return Ok(()); |
263 | 0 | } |
264 | 0 | self.entries |
265 | 0 | .try_reserve_exact(additional) |
266 | 0 | .map_err(TryReserveError::from_alloc) |
267 | 0 | } |
268 | | |
269 | | /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. |
270 | 0 | pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { |
271 | 0 | self.indices |
272 | 0 | .try_reserve(additional, get_hash(&self.entries)) |
273 | 0 | .map_err(TryReserveError::from_hashbrown)?; |
274 | 0 | self.entries |
275 | 0 | .try_reserve_exact(additional) |
276 | 0 | .map_err(TryReserveError::from_alloc) |
277 | 0 | } |
278 | | |
279 | | /// Shrink the capacity of the map with a lower bound |
280 | 0 | pub(crate) fn shrink_to(&mut self, min_capacity: usize) { |
281 | 0 | self.indices |
282 | 0 | .shrink_to(min_capacity, get_hash(&self.entries)); |
283 | 0 | self.entries.shrink_to(min_capacity); |
284 | 0 | } |
285 | | |
286 | | /// Remove the last key-value pair |
287 | 0 | pub(crate) fn pop(&mut self) -> Option<(K, V)> { |
288 | 0 | if let Some(entry) = self.entries.pop() { |
289 | 0 | let last = self.entries.len(); |
290 | 0 | erase_index(&mut self.indices, entry.hash, last); |
291 | 0 | Some((entry.key, entry.value)) |
292 | | } else { |
293 | 0 | None |
294 | | } |
295 | 0 | } |
296 | | |
297 | | /// Return the index in `entries` where an equivalent key can be found |
298 | 265k | pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize> |
299 | 265k | where |
300 | 265k | Q: ?Sized + Equivalent<K>, |
301 | | { |
302 | 265k | let eq = equivalent(key, &self.entries); |
303 | 265k | self.indices.find(hash.get(), eq).copied() |
304 | 265k | } Unexecuted instantiation: <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::get_index_of::<alloc::string::String> <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::get_index_of::<str> Line | Count | Source | 298 | 265k | pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize> | 299 | 265k | where | 300 | 265k | Q: ?Sized + Equivalent<K>, | 301 | | { | 302 | 265k | let eq = equivalent(key, &self.entries); | 303 | 265k | self.indices.find(hash.get(), eq).copied() | 304 | 265k | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::get_index_of::<_> |
305 | | |
306 | | /// Return the index in `entries` where an equivalent key can be found |
307 | 0 | pub(crate) fn get_index_of_raw<F>(&self, hash: HashValue, mut is_match: F) -> Option<usize> |
308 | 0 | where |
309 | 0 | F: FnMut(&K) -> bool, |
310 | | { |
311 | 0 | let eq = move |&i: &usize| is_match(&self.entries[i].key); |
312 | 0 | self.indices.find(hash.get(), eq).copied() |
313 | 0 | } |
314 | | |
315 | | /// Append a key-value pair to `entries`, |
316 | | /// *without* checking whether it already exists. |
317 | 1.71M | fn push_entry(&mut self, hash: HashValue, key: K, value: V) { |
318 | 1.71M | if self.entries.len() == self.entries.capacity() { |
319 | 465k | // Reserve our own capacity synced to the indices, |
320 | 465k | // rather than letting `Vec::push` just double it. |
321 | 465k | self.reserve_entries(1); |
322 | 1.24M | } |
323 | 1.71M | self.entries.push(Bucket { hash, key, value }); |
324 | 1.71M | } <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::push_entry Line | Count | Source | 317 | 1.71M | fn push_entry(&mut self, hash: HashValue, key: K, value: V) { | 318 | 1.71M | if self.entries.len() == self.entries.capacity() { | 319 | 465k | // Reserve our own capacity synced to the indices, | 320 | 465k | // rather than letting `Vec::push` just double it. | 321 | 465k | self.reserve_entries(1); | 322 | 1.24M | } | 323 | 1.71M | self.entries.push(Bucket { hash, key, value }); | 324 | 1.71M | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::push_entry |
325 | | |
326 | 4.79M | pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>) |
327 | 4.79M | where |
328 | 4.79M | K: Eq, |
329 | | { |
330 | 4.79M | let eq = equivalent(&key, &self.entries); |
331 | 4.79M | let hasher = get_hash(&self.entries); |
332 | 4.79M | match self.indices.entry(hash.get(), eq, hasher) { |
333 | 3.08M | hash_table::Entry::Occupied(entry) => { |
334 | 3.08M | let i = *entry.get(); |
335 | 3.08M | (i, Some(mem::replace(&mut self.entries[i].value, value))) |
336 | | } |
337 | 1.71M | hash_table::Entry::Vacant(entry) => { |
338 | 1.71M | let i = self.entries.len(); |
339 | 1.71M | entry.insert(i); |
340 | 1.71M | self.push_entry(hash, key, value); |
341 | 1.71M | debug_assert_eq!(self.indices.len(), self.entries.len()); |
342 | 1.71M | (i, None) |
343 | | } |
344 | | } |
345 | 4.79M | } <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::insert_full Line | Count | Source | 326 | 4.79M | pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>) | 327 | 4.79M | where | 328 | 4.79M | K: Eq, | 329 | | { | 330 | 4.79M | let eq = equivalent(&key, &self.entries); | 331 | 4.79M | let hasher = get_hash(&self.entries); | 332 | 4.79M | match self.indices.entry(hash.get(), eq, hasher) { | 333 | 3.08M | hash_table::Entry::Occupied(entry) => { | 334 | 3.08M | let i = *entry.get(); | 335 | 3.08M | (i, Some(mem::replace(&mut self.entries[i].value, value))) | 336 | | } | 337 | 1.71M | hash_table::Entry::Vacant(entry) => { | 338 | 1.71M | let i = self.entries.len(); | 339 | 1.71M | entry.insert(i); | 340 | 1.71M | self.push_entry(hash, key, value); | 341 | 1.71M | debug_assert_eq!(self.indices.len(), self.entries.len()); | 342 | 1.71M | (i, None) | 343 | | } | 344 | | } | 345 | 4.79M | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::insert_full |
346 | | |
347 | | /// Same as `insert_full`, except it also replaces the key |
348 | 0 | pub(crate) fn replace_full( |
349 | 0 | &mut self, |
350 | 0 | hash: HashValue, |
351 | 0 | key: K, |
352 | 0 | value: V, |
353 | 0 | ) -> (usize, Option<(K, V)>) |
354 | 0 | where |
355 | 0 | K: Eq, |
356 | | { |
357 | 0 | let eq = equivalent(&key, &self.entries); |
358 | 0 | let hasher = get_hash(&self.entries); |
359 | 0 | match self.indices.entry(hash.get(), eq, hasher) { |
360 | 0 | hash_table::Entry::Occupied(entry) => { |
361 | 0 | let i = *entry.get(); |
362 | 0 | let entry = &mut self.entries[i]; |
363 | 0 | let kv = ( |
364 | 0 | mem::replace(&mut entry.key, key), |
365 | 0 | mem::replace(&mut entry.value, value), |
366 | 0 | ); |
367 | 0 | (i, Some(kv)) |
368 | | } |
369 | 0 | hash_table::Entry::Vacant(entry) => { |
370 | 0 | let i = self.entries.len(); |
371 | 0 | entry.insert(i); |
372 | 0 | self.push_entry(hash, key, value); |
373 | 0 | debug_assert_eq!(self.indices.len(), self.entries.len()); |
374 | 0 | (i, None) |
375 | | } |
376 | | } |
377 | 0 | } |
378 | | |
379 | | /// Remove an entry by shifting all entries that follow it |
380 | 0 | pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> |
381 | 0 | where |
382 | 0 | Q: ?Sized + Equivalent<K>, |
383 | | { |
384 | 0 | let eq = equivalent(key, &self.entries); |
385 | 0 | let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove(); |
386 | 0 | let (key, value) = self.shift_remove_finish(index); |
387 | 0 | Some((index, key, value)) |
388 | 0 | } |
389 | | |
390 | | /// Remove an entry by swapping it with the last |
391 | 0 | pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> |
392 | 0 | where |
393 | 0 | Q: ?Sized + Equivalent<K>, |
394 | | { |
395 | 0 | let eq = equivalent(key, &self.entries); |
396 | 0 | let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove(); |
397 | 0 | let (key, value) = self.swap_remove_finish(index); |
398 | 0 | Some((index, key, value)) |
399 | 0 | } |
400 | | |
401 | | /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` |
402 | | /// |
403 | | /// All of these items should still be at their original location in `entries`. |
404 | | /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. |
405 | 0 | fn erase_indices(&mut self, start: usize, end: usize) { |
406 | 0 | let (init, shifted_entries) = self.entries.split_at(end); |
407 | 0 | let (start_entries, erased_entries) = init.split_at(start); |
408 | | |
409 | 0 | let erased = erased_entries.len(); |
410 | 0 | let shifted = shifted_entries.len(); |
411 | 0 | let half_capacity = self.indices.capacity() / 2; |
412 | | |
413 | | // Use a heuristic between different strategies |
414 | 0 | if erased == 0 { |
415 | 0 | // Degenerate case, nothing to do |
416 | 0 | } else if start + shifted < half_capacity && start < erased { |
417 | 0 | // Reinsert everything, as there are few kept indices |
418 | 0 | self.indices.clear(); |
419 | 0 |
|
420 | 0 | // Reinsert stable indices, then shifted indices |
421 | 0 | insert_bulk_no_grow(&mut self.indices, start_entries); |
422 | 0 | insert_bulk_no_grow(&mut self.indices, shifted_entries); |
423 | 0 | } else if erased + shifted < half_capacity { |
424 | | // Find each affected index, as there are few to adjust |
425 | | |
426 | | // Find erased indices |
427 | 0 | for (i, entry) in (start..).zip(erased_entries) { |
428 | 0 | erase_index(&mut self.indices, entry.hash, i); |
429 | 0 | } |
430 | | |
431 | | // Find shifted indices |
432 | 0 | for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { |
433 | 0 | update_index(&mut self.indices, entry.hash, old, new); |
434 | 0 | } |
435 | | } else { |
436 | | // Sweep the whole table for adjustments |
437 | 0 | let offset = end - start; |
438 | 0 | self.indices.retain(move |i| { |
439 | 0 | if *i >= end { |
440 | 0 | *i -= offset; |
441 | 0 | true |
442 | | } else { |
443 | 0 | *i < start |
444 | | } |
445 | 0 | }); |
446 | | } |
447 | | |
448 | 0 | debug_assert_eq!(self.indices.len(), start + shifted); |
449 | 0 | } |
450 | | |
451 | 0 | pub(crate) fn retain_in_order<F>(&mut self, mut keep: F) |
452 | 0 | where |
453 | 0 | F: FnMut(&mut K, &mut V) -> bool, |
454 | | { |
455 | 0 | self.entries |
456 | 0 | .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); |
457 | 0 | if self.entries.len() < self.indices.len() { |
458 | 0 | self.rebuild_hash_table(); |
459 | 0 | } |
460 | 0 | } |
461 | | |
462 | 0 | fn rebuild_hash_table(&mut self) { |
463 | 0 | self.indices.clear(); |
464 | 0 | insert_bulk_no_grow(&mut self.indices, &self.entries); |
465 | 0 | } |
466 | | |
467 | 0 | pub(crate) fn reverse(&mut self) { |
468 | 0 | self.entries.reverse(); |
469 | | |
470 | | // No need to save hash indices, can easily calculate what they should |
471 | | // be, given that this is an in-place reversal. |
472 | 0 | let len = self.entries.len(); |
473 | 0 | for i in &mut self.indices { |
474 | 0 | *i = len - *i - 1; |
475 | 0 | } |
476 | 0 | } |
477 | | |
478 | | /// Reserve entries capacity, rounded up to match the indices |
479 | | #[inline] |
480 | 465k | fn reserve_entries(&mut self, additional: usize) { |
481 | | // Use a soft-limit on the maximum capacity, but if the caller explicitly |
482 | | // requested more, do it and let them have the resulting panic. |
483 | 465k | let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); |
484 | 465k | let try_add = try_capacity - self.entries.len(); |
485 | 465k | if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { |
486 | 465k | return; |
487 | 0 | } |
488 | 0 | self.entries.reserve_exact(additional); |
489 | 465k | } <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::reserve_entries Line | Count | Source | 480 | 465k | fn reserve_entries(&mut self, additional: usize) { | 481 | | // Use a soft-limit on the maximum capacity, but if the caller explicitly | 482 | | // requested more, do it and let them have the resulting panic. | 483 | 465k | let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); | 484 | 465k | let try_add = try_capacity - self.entries.len(); | 485 | 465k | if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { | 486 | 465k | return; | 487 | 0 | } | 488 | 0 | self.entries.reserve_exact(additional); | 489 | 465k | } |
Unexecuted instantiation: <indexmap::inner::Core<_, _>>::reserve_entries |
490 | | |
491 | | /// Insert a key-value pair in `entries`, |
492 | | /// *without* checking whether it already exists. |
493 | 0 | pub(super) fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> &mut Bucket<K, V> { |
494 | 0 | let i = self.indices.len(); |
495 | 0 | debug_assert_eq!(i, self.entries.len()); |
496 | 0 | self.indices |
497 | 0 | .insert_unique(hash.get(), i, get_hash(&self.entries)); |
498 | 0 | self.push_entry(hash, key, value); |
499 | 0 | &mut self.entries[i] |
500 | 0 | } Unexecuted instantiation: <indexmap::inner::Core<alloc::string::String, bson::bson::Bson>>::insert_unique Unexecuted instantiation: <indexmap::inner::Core<_, _>>::insert_unique |
501 | | |
502 | | /// Replaces the key at the given index, |
503 | | /// *without* checking whether it already exists. |
504 | | #[track_caller] |
505 | 0 | pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K { |
506 | | // NB: This removal and insertion isn't "no grow" (with unreachable hasher) |
507 | | // because hashbrown's tombstones might force a resize anyway. |
508 | 0 | erase_index(&mut self.indices, self.entries[index].hash, index); |
509 | 0 | self.indices |
510 | 0 | .insert_unique(hash.get(), index, get_hash(&self.entries)); |
511 | | |
512 | 0 | let entry = &mut self.entries[index]; |
513 | 0 | entry.hash = hash; |
514 | 0 | mem::replace(&mut entry.key, key) |
515 | 0 | } |
516 | | |
517 | | /// Insert a key-value pair in `entries` at a particular index, |
518 | | /// *without* checking whether it already exists. |
519 | 0 | pub(crate) fn shift_insert_unique( |
520 | 0 | &mut self, |
521 | 0 | index: usize, |
522 | 0 | hash: HashValue, |
523 | 0 | key: K, |
524 | 0 | value: V, |
525 | 0 | ) -> &mut Bucket<K, V> { |
526 | 0 | let end = self.indices.len(); |
527 | 0 | assert!(index <= end); |
528 | | // Increment others first so we don't have duplicate indices. |
529 | 0 | self.increment_indices(index, end); |
530 | 0 | let entries = &*self.entries; |
531 | 0 | self.indices.insert_unique(hash.get(), index, move |&i| { |
532 | | // Adjust for the incremented indices to find hashes. |
533 | 0 | debug_assert_ne!(i, index); |
534 | 0 | let i = if i < index { i } else { i - 1 }; |
535 | 0 | entries[i].hash.get() |
536 | 0 | }); |
537 | 0 | if self.entries.len() == self.entries.capacity() { |
538 | 0 | // Reserve our own capacity synced to the indices, |
539 | 0 | // rather than letting `Vec::insert` just double it. |
540 | 0 | self.reserve_entries(1); |
541 | 0 | } |
542 | 0 | self.entries.insert(index, Bucket { hash, key, value }); |
543 | 0 | &mut self.entries[index] |
544 | 0 | } |
545 | | |
546 | | /// Remove an entry by shifting all entries that follow it |
547 | 0 | pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
548 | 0 | match self.entries.get(index) { |
549 | 0 | Some(entry) => { |
550 | 0 | erase_index(&mut self.indices, entry.hash, index); |
551 | 0 | Some(self.shift_remove_finish(index)) |
552 | | } |
553 | 0 | None => None, |
554 | | } |
555 | 0 | } |
556 | | |
557 | | /// Remove an entry by shifting all entries that follow it |
558 | | /// |
559 | | /// The index should already be removed from `self.indices`. |
560 | 0 | fn shift_remove_finish(&mut self, index: usize) -> (K, V) { |
561 | | // Correct indices that point to the entries that followed the removed entry. |
562 | 0 | self.decrement_indices(index + 1, self.entries.len()); |
563 | | |
564 | | // Use Vec::remove to actually remove the entry. |
565 | 0 | let entry = self.entries.remove(index); |
566 | 0 | (entry.key, entry.value) |
567 | 0 | } |
568 | | |
569 | | /// Remove an entry by swapping it with the last |
570 | 0 | pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
571 | 0 | match self.entries.get(index) { |
572 | 0 | Some(entry) => { |
573 | 0 | erase_index(&mut self.indices, entry.hash, index); |
574 | 0 | Some(self.swap_remove_finish(index)) |
575 | | } |
576 | 0 | None => None, |
577 | | } |
578 | 0 | } |
579 | | |
580 | | /// Finish removing an entry by swapping it with the last |
581 | | /// |
582 | | /// The index should already be removed from `self.indices`. |
583 | 0 | fn swap_remove_finish(&mut self, index: usize) -> (K, V) { |
584 | | // use swap_remove, but then we need to update the index that points |
585 | | // to the other entry that has to move |
586 | 0 | let entry = self.entries.swap_remove(index); |
587 | | |
588 | | // correct index that points to the entry that had to swap places |
589 | 0 | if let Some(entry) = self.entries.get(index) { |
590 | 0 | // was not last element |
591 | 0 | // examine new element in `index` and find it in indices |
592 | 0 | let last = self.entries.len(); |
593 | 0 | update_index(&mut self.indices, entry.hash, last, index); |
594 | 0 | } |
595 | | |
596 | 0 | (entry.key, entry.value) |
597 | 0 | } |
598 | | |
599 | | /// Decrement all indices in the range `start..end`. |
600 | | /// |
601 | | /// The index `start - 1` should not exist in `self.indices`. |
602 | | /// All entries should still be in their original positions. |
603 | 0 | fn decrement_indices(&mut self, start: usize, end: usize) { |
604 | | // Use a heuristic between a full sweep vs. a `find()` for every shifted item. |
605 | 0 | let shifted_entries = &self.entries[start..end]; |
606 | 0 | if shifted_entries.len() > self.indices.capacity() / 2 { |
607 | | // Shift all indices in range. |
608 | 0 | for i in &mut self.indices { |
609 | 0 | if start <= *i && *i < end { |
610 | 0 | *i -= 1; |
611 | 0 | } |
612 | | } |
613 | | } else { |
614 | | // Find each entry in range to shift its index. |
615 | 0 | for (i, entry) in (start..end).zip(shifted_entries) { |
616 | 0 | update_index(&mut self.indices, entry.hash, i, i - 1); |
617 | 0 | } |
618 | | } |
619 | 0 | } |
620 | | |
621 | | /// Increment all indices in the range `start..end`. |
622 | | /// |
623 | | /// The index `end` should not exist in `self.indices`. |
624 | | /// All entries should still be in their original positions. |
625 | 0 | fn increment_indices(&mut self, start: usize, end: usize) { |
626 | | // Use a heuristic between a full sweep vs. a `find()` for every shifted item. |
627 | 0 | let shifted_entries = &self.entries[start..end]; |
628 | 0 | if shifted_entries.len() > self.indices.capacity() / 2 { |
629 | | // Shift all indices in range. |
630 | 0 | for i in &mut self.indices { |
631 | 0 | if start <= *i && *i < end { |
632 | 0 | *i += 1; |
633 | 0 | } |
634 | | } |
635 | | } else { |
636 | | // Find each entry in range to shift its index, updated in reverse so |
637 | | // we never have duplicated indices that might have a hash collision. |
638 | 0 | for (i, entry) in (start..end).zip(shifted_entries).rev() { |
639 | 0 | update_index(&mut self.indices, entry.hash, i, i + 1); |
640 | 0 | } |
641 | | } |
642 | 0 | } |
643 | | |
644 | | #[track_caller] |
645 | 0 | pub(super) fn move_index(&mut self, from: usize, to: usize) { |
646 | 0 | let from_hash = self.entries[from].hash; |
647 | 0 | if from != to { |
648 | 0 | let _ = self.entries[to]; // explicit bounds check |
649 | | |
650 | | // Find the bucket index first so we won't lose it among other updated indices. |
651 | 0 | let bucket = self |
652 | 0 | .indices |
653 | 0 | .find_bucket_index(from_hash.get(), move |&i| i == from) |
654 | 0 | .expect("index not found"); |
655 | | |
656 | 0 | self.move_index_inner(from, to); |
657 | 0 | *self.indices.get_bucket_mut(bucket).unwrap() = to; |
658 | 0 | } |
659 | 0 | } |
660 | | |
661 | 0 | fn move_index_inner(&mut self, from: usize, to: usize) { |
662 | | // Update all other indices and rotate the entry positions. |
663 | 0 | if from < to { |
664 | 0 | self.decrement_indices(from + 1, to + 1); |
665 | 0 | self.entries[from..=to].rotate_left(1); |
666 | 0 | } else if to < from { |
667 | 0 | self.increment_indices(to, from); |
668 | 0 | self.entries[to..=from].rotate_right(1); |
669 | 0 | } |
670 | 0 | } |
671 | | |
672 | | #[track_caller] |
673 | 0 | pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { |
674 | | // If they're equal and in-bounds, there's nothing to do. |
675 | 0 | if a == b && a < self.entries.len() { |
676 | 0 | return; |
677 | 0 | } |
678 | | |
679 | | // We'll get a "nice" bounds-check from indexing `entries`, |
680 | | // and then we expect to find it in the table as well. |
681 | 0 | match self.indices.get_disjoint_mut( |
682 | 0 | [self.entries[a].hash.get(), self.entries[b].hash.get()], |
683 | 0 | move |i, &x| if i == 0 { x == a } else { x == b }, |
684 | | ) { |
685 | 0 | [Some(ref_a), Some(ref_b)] => { |
686 | 0 | mem::swap(ref_a, ref_b); |
687 | 0 | self.entries.swap(a, b); |
688 | 0 | } |
689 | 0 | _ => panic!("indices not found"), |
690 | | } |
691 | 0 | } |
692 | | } |
693 | | |
694 | | #[test] |
695 | | fn assert_send_sync() { |
696 | | fn assert_send_sync<T: Send + Sync>() {} |
697 | | assert_send_sync::<Core<i32, i32>>(); |
698 | | } |