Coverage Report

Created: 2025-12-28 06:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/moka-0.12.10/src/cht/segment.rs
Line
Count
Source
1
//! Segmented lock-free hash tables.
2
//!
3
//! Segmented hash tables divide their entries between a number of smaller
4
//! logical hash tables, or segments. Each segment is entirely independent from
5
//! the others, and entries are never relocated across segment boundaries.
6
//!
7
//! In the context of this crate, a segment refers specifically to an array of
8
//! bucket pointers. The number of segments in a hash table is rounded up to the
9
//! nearest power of two; this is so that selecting the segment for a key is no
10
//! more than a right shift to select the most significant bits of a hashed key.
11
//!
12
//! Each segment is entirely independent from the others, all operations can be
13
//! performed concurrently by multiple threads. Should a set of threads be
14
//! operating on disjoint sets of segments, the only synchronization between
15
//! them will be destructive interference as they access and update the bucket
16
//! array pointer and length for each segment.
17
//!
18
//! Compared to the unsegmented hash tables in this crate, the segmented hash
19
//! tables have higher concurrent write throughput for disjoint sets of keys.
20
//! However, the segmented hash tables have slightly lower read and
21
//! single-threaded write throughput. This is because the segmenting structure
22
//! adds another layer of indirection between the hash table and its buckets.
23
//!
24
//! The idea for segmenting hash tables was inspired by the
25
//! [`ConcurrentHashMap`] from OpenJDK 7, which consists of a number of
26
//! separately-locked segments. OpenJDK 8 introduced a striped concurrent hash
27
//! map that stripes a set of bucket locks across the set of buckets using the
28
//! least significant bits of hashed keys.
29
//!
30
//! [`ConcurrentHashMap`]: https://github.com/openjdk-mirror/jdk7u-jdk/blob/master/src/share/classes/java/util/concurrent/ConcurrentHashMap.java
31
32
use crate::cht::map::{
33
    bucket::{self, BucketArray},
34
    bucket_array_ref::BucketArrayRef,
35
    DefaultHashBuilder,
36
};
37
38
use super::iter::{Iter, ScanningGet};
39
40
use std::{
41
    borrow::Borrow,
42
    hash::{BuildHasher, Hash},
43
    ptr,
44
    sync::atomic::{self, AtomicUsize, Ordering},
45
};
46
47
use crossbeam_epoch::Atomic;
48
49
/// A lock-free hash map implemented with segmented bucket pointer arrays, open
50
/// addressing, and linear probing.
51
///
52
/// By default, `Cache` uses a hashing algorithm selected to provide resistance
53
/// against HashDoS attacks.
54
///
55
/// The default hashing algorithm is the one used by `std::collections::HashMap`,
56
/// which is currently SipHash 1-3.
57
///
58
/// While its performance is very competitive for medium sized keys, other hashing
59
/// algorithms will outperform it for small keys such as integers as well as large
60
/// keys such as long strings. However those algorithms will typically not protect
61
/// against attacks such as HashDoS.
62
///
63
/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
64
/// [`default`], [`with_hasher`], [`with_capacity_and_hasher`],
65
/// [`with_num_segments_and_hasher`], and
66
/// [`with_num_segments_capacity_and_hasher`] methods. Many alternative
67
/// algorithms are available on crates.io, such as the [`AHash`] crate.
68
///
69
/// The number of segments can be specified on a per-`HashMap` basis using the
70
/// [`with_num_segments`], [`with_num_segments_and_capacity`],
71
/// [`with_num_segments_and_hasher`], and
72
/// [`with_num_segments_capacity_and_hasher`] methods. By default, the
73
/// `num-cpus` feature is enabled and [`new`], [`with_capacity`],
74
/// [`with_hasher`], and [`with_capacity_and_hasher`] will create maps with
75
/// twice as many segments as the system has CPUs.
76
///
77
/// It is required that the keys implement the [`Eq`] and [`Hash`] traits,
78
/// although this can frequently be achieved by using
79
/// `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself, it is
80
/// important that the following property holds:
81
///
82
/// ```text
83
/// k1 == k2 -> hash(k1) == hash(k2)
84
/// ```
85
///
86
/// In other words, if two keys are equal, their hashes must be equal.
87
///
88
/// It is a logic error for a key to be modified in such a way that the key's
89
/// hash, as determined by the [`Hash`] trait, or its equality, as determined by
90
/// the [`Eq`] trait, changes while it is in the map. This is normally only
91
/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
92
///
93
/// [`AHash`]: https://crates.io/crates/ahash
94
/// [`default`]: #method.default
95
/// [`with_hasher`]: #method.with_hasher
96
/// [`with_capacity`]: #method.with_capacity
97
/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher
98
/// [`with_num_segments_and_hasher`]: #method.with_num_segments_and_hasher
99
/// [`with_num_segments_capacity_and_hasher`]: #method.with_num_segments_capacity_and_hasher
100
/// [`with_num_segments`]: #method.with_num_segments
101
/// [`with_num_segments_and_capacity`]: #method.with_num_segments_and_capacity
102
/// [`new`]: #method.new
103
/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html
104
/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html
105
/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Ref.html
106
/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html
107
pub(crate) struct HashMap<K, V, S = DefaultHashBuilder> {
108
    segments: Box<[Segment<K, V>]>,
109
    build_hasher: S,
110
    len: AtomicUsize,
111
    segment_shift: u32,
112
}
113
114
#[cfg(test)]
115
impl<K, V> HashMap<K, V, DefaultHashBuilder> {
116
    /// Creates an empty `HashMap` with the specified capacity.
117
    ///
118
    /// The hash map will be able to hold at least `capacity` elements without
119
    /// reallocating any bucket pointer arrays. If `capacity` is 0, the hash map
120
    /// will not allocate any bucket pointer arrays. However, it will always
121
    /// allocate memory for segment pointers and lengths.
122
    ///
123
    /// The `HashMap` will be created with at least twice as many segments as
124
    /// the system has CPUs.
125
    pub fn with_capacity(capacity: usize) -> Self {
126
        Self::with_num_segments_capacity_and_hasher(
127
            default_num_segments(),
128
            capacity,
129
            DefaultHashBuilder::default(),
130
        )
131
    }
132
}
133
134
impl<K, V, S> HashMap<K, V, S> {
135
    /// Creates an empty `HashMap` with the specified number of segments, using
136
    /// `build_hasher` to hash the keys.
137
    ///
138
    /// The hash map is initially created with a capacity of 0, so it will not
139
    /// allocate bucket pointer arrays until it is first inserted into. However,
140
    /// it will always allocate memory for segment pointers and lengths.
141
    ///
142
    /// # Panics
143
    ///
144
    /// Panics if `num_segments` is 0.
145
0
    pub(crate) fn with_num_segments_and_hasher(num_segments: usize, build_hasher: S) -> Self {
146
0
        Self::with_num_segments_capacity_and_hasher(num_segments, 0, build_hasher)
147
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::with_num_segments_and_hasher
Unexecuted instantiation: <moka::cht::segment::HashMap<(alloc::sync::Arc<hickory_proto::op::query::Query>, core::any::TypeId), moka::common::concurrent::arc::MiniArc<lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::sync::value_initializer::WaiterValue<hickory_resolver::dns_lru::LruValue>>>>>::with_num_segments_and_hasher
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::with_num_segments_and_hasher
148
149
    /// Creates an empty `HashMap` with the specified number of segments and
150
    /// capacity, using `build_hasher` to hash the keys.
151
    ///
152
    /// The hash map will be able to hold at least `capacity` elements without
153
    /// reallocating any bucket pointer arrays. If `capacity` is 0, the hash map
154
    /// will not allocate any bucket pointer arrays. However, it will always
155
    /// allocate memory for segment pointers and lengths.
156
    ///
157
    /// # Panics
158
    ///
159
    /// Panics if `num_segments` is 0.
160
0
    pub(crate) fn with_num_segments_capacity_and_hasher(
161
0
        num_segments: usize,
162
0
        capacity: usize,
163
0
        build_hasher: S,
164
0
    ) -> Self {
165
0
        assert!(num_segments > 0);
166
167
0
        let actual_num_segments = num_segments.next_power_of_two();
168
0
        let segment_shift = 64 - actual_num_segments.trailing_zeros();
169
170
0
        let mut segments = Vec::with_capacity(actual_num_segments);
171
172
0
        if capacity == 0 {
173
0
            unsafe {
174
0
                ptr::write_bytes(segments.as_mut_ptr(), 0, actual_num_segments);
175
0
                segments.set_len(actual_num_segments);
176
0
            }
177
        } else {
178
0
            let actual_capacity = (capacity * 2 / actual_num_segments).next_power_of_two();
179
180
0
            for _ in 0..actual_num_segments {
181
0
                segments.push(Segment {
182
0
                    bucket_array: Atomic::new(BucketArray::with_length(0, actual_capacity)),
183
0
                    len: AtomicUsize::new(0),
184
0
                });
185
0
            }
186
        }
187
188
0
        let segments = segments.into_boxed_slice();
189
190
0
        Self {
191
0
            segments,
192
0
            build_hasher,
193
0
            len: AtomicUsize::new(0),
194
0
            segment_shift,
195
0
        }
196
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::with_num_segments_capacity_and_hasher
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::with_num_segments_capacity_and_hasher
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::with_num_segments_capacity_and_hasher
Unexecuted instantiation: <moka::cht::segment::HashMap<(alloc::sync::Arc<hickory_proto::op::query::Query>, core::any::TypeId), moka::common::concurrent::arc::MiniArc<lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::sync::value_initializer::WaiterValue<hickory_resolver::dns_lru::LruValue>>>>>::with_num_segments_capacity_and_hasher
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::with_num_segments_capacity_and_hasher
197
198
0
    pub(crate) fn actual_num_segments(&self) -> usize {
199
0
        self.segments.len()
200
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::actual_num_segments
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::actual_num_segments
201
202
    /// Returns the number of elements in the map.
203
    ///
204
    /// # Safety
205
    ///
206
    /// This method on its own is safe, but other threads can add or remove
207
    /// elements at any time.
208
0
    pub(crate) fn len(&self) -> usize {
209
0
        self.len.load(Ordering::Relaxed)
210
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::len
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::len
211
212
    /// Returns `true` if the map contains no elements.
213
    ///
214
    /// # Safety
215
    ///
216
    /// This method on its own is safe, but other threads can add or remove
217
    /// elements at any time.
218
0
    pub(crate) fn is_empty(&self) -> bool {
219
0
        self.len() == 0
220
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::is_empty
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::is_empty
221
222
    /// Returns the number of elements the map can hold without reallocating any
223
    /// bucket pointer arrays.
224
    ///
225
    /// Note that all mutating operations except removal will result in a bucket
226
    /// being allocated or reallocated.
227
    ///
228
    /// # Safety
229
    ///
230
    /// This method on its own is safe, but other threads can increase the
231
    /// capacity of each segment at any time by adding elements.
232
    #[cfg(any(test, feature = "unstable-debug-counters"))]
233
    pub(crate) fn capacity(&self) -> usize {
234
        let guard = &crossbeam_epoch::pin();
235
236
        self.segments
237
            .iter()
238
            .map(|s| s.bucket_array.load_consume(guard))
239
            .map(|p| unsafe { p.as_ref() })
240
            .map(|a| a.map_or(0, BucketArray::capacity))
241
            .sum::<usize>()
242
    }
243
244
    #[cfg(test)]
245
    /// Returns the number of segments in the map.
246
    pub(crate) fn num_segments(&self) -> usize {
247
        self.segments.len()
248
    }
249
}
250
251
impl<K: Hash + Eq, V, S: BuildHasher> HashMap<K, V, S> {
252
    #[inline]
253
0
    pub(crate) fn contains_key(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> bool {
254
0
        self.get_key_value_and_then(hash, eq, |_, _| Some(()))
255
0
            .is_some()
256
0
    }
257
258
    /// Returns a clone of the value corresponding to the key.
259
    #[inline]
260
0
    pub(crate) fn get(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> Option<V>
261
0
    where
262
0
        V: Clone,
263
    {
264
0
        self.get_key_value_and(hash, eq, |_, v| v.clone())
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::get::<_>::{closure#0}
265
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::get::<_>
266
267
    /// Returns the result of invoking a function with a reference to the
268
    /// key-value pair corresponding to the supplied key.
269
    #[inline]
270
0
    pub(crate) fn get_key_value_and<T>(
271
0
        &self,
272
0
        hash: u64,
273
0
        eq: impl FnMut(&K) -> bool,
274
0
        with_entry: impl FnOnce(&K, &V) -> T,
275
0
    ) -> Option<T> {
276
0
        self.get_key_value_and_then(hash, eq, |k, v| Some(with_entry(k, v)))
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::get_key_value_and::<_, _, _>::{closure#0}
277
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::get_key_value_and::<_, _, _>
278
279
    /// Returns the result of invoking a function with a reference to the
280
    /// key-value pair corresponding to the supplied key.
281
    #[inline]
282
0
    pub(crate) fn get_key_value_and_then<T>(
283
0
        &self,
284
0
        hash: u64,
285
0
        eq: impl FnMut(&K) -> bool,
286
0
        with_entry: impl FnOnce(&K, &V) -> Option<T>,
287
0
    ) -> Option<T> {
288
0
        self.bucket_array_ref(hash)
289
0
            .get_key_value_and_then(hash, eq, with_entry)
290
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and_then::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::apply::{closure#0}>::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and_then::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_ao::{closure#0}>::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and_then::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::skip_updated_entry_wo::{closure#0}>::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and_then::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::admit::{closure#0}>::{closure#0}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::get_key_value_and_then::<(core::option::Option<alloc::sync::Arc<hickory_proto::op::query::Query>>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>), <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::get_key_value_and_then<hickory_proto::op::query::Query, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_get_with_hash<hickory_proto::op::query::Query, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::get_with_hash<hickory_proto::op::query::Query>::{closure#0}, for<'a> fn(&'a hickory_resolver::dns_lru::LruValue) -> bool>::{closure#0}, (core::option::Option<alloc::sync::Arc<hickory_proto::op::query::Query>>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>)>::{closure#0}, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_get_with_hash<hickory_proto::op::query::Query, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::get_with_hash<hickory_proto::op::query::Query>::{closure#0}, for<'a> fn(&'a hickory_resolver::dns_lru::LruValue) -> bool>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::get_key_value_and_then::<moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as moka::cht::iter::ScanningGet<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::scanning_get::{closure#0}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as moka::cht::iter::ScanningGet<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::scanning_get::{closure#1}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::get_key_value_and_then::<_, _, _>
291
292
    /// Inserts a key-value pair into the map, returning the result of invoking
293
    /// a function with a reference to the key-value pair previously
294
    /// corresponding to the supplied key.
295
    ///
296
    /// If the map did have this key present, both the key and value are
297
    /// updated.
298
    #[inline]
299
0
    pub fn insert_entry_and<T>(
300
0
        &self,
301
0
        key: K,
302
0
        hash: u64,
303
0
        value: V,
304
0
        with_previous_entry: impl FnOnce(&K, &V) -> T,
305
0
    ) -> Option<T>
306
0
    where
307
0
        V: Clone,
308
    {
309
0
        let result = self
310
0
            .bucket_array_ref(hash)
311
            // .insert_entry_and(key, hash, value, with_previous_entry);
312
0
            .insert_with_or_modify_entry_and(
313
0
                key,
314
0
                hash,
315
                || value,
316
0
                |_k, v| v.clone(),
317
0
                with_previous_entry,
318
            );
319
320
0
        if result.is_none() {
321
0
            self.len.fetch_add(1, Ordering::Relaxed);
322
0
        }
323
324
0
        result
325
0
    }
326
327
    /// Removes a key from the map, returning a clone of the value previously
328
    /// corresponding to the key.
329
    #[inline]
330
0
    pub(crate) fn remove(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> Option<V>
331
0
    where
332
0
        V: Clone,
333
    {
334
0
        self.remove_entry_if_and(hash, eq, |_, _| true, |_, v| v.clone())
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}>::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}>::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::remove::<_>::{closure#1}
335
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::remove::<_>
336
337
    /// Removes a key from the map, returning a clone of the key-value pair
338
    /// previously corresponding to the key.
339
    #[inline]
340
0
    pub(crate) fn remove_entry(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> Option<(K, V)>
341
0
    where
342
0
        K: Clone,
343
0
        V: Clone,
344
    {
345
0
        self.remove_entry_if_and(hash, eq, |_, _| true, |k, v| (k.clone(), v.clone()))
346
0
    }
347
348
    /// Removes a key from the map if a condition is met, returning a clone of
349
    /// the value previously corresponding to the key.
350
    ///
351
    /// `condition` will be invoked at least once if [`Some`] is returned. It
352
    /// may also be invoked one or more times if [`None`] is returned.
353
    ///
354
    /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
355
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
356
0
    pub(crate) fn remove_if(
357
0
        &self,
358
0
        hash: u64,
359
0
        eq: impl FnMut(&K) -> bool,
360
0
        condition: impl FnMut(&K, &V) -> bool,
361
0
    ) -> Option<V>
362
0
    where
363
0
        V: Clone,
364
    {
365
0
        self.remove_entry_if_and(hash, eq, condition, move |_, v| v.clone())
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#1}, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#2}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#8}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#9}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#2}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#3}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#3}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#3}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#2}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::remove_if::<<moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#0}, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#1}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::remove_if::<_, _>::{closure#0}
366
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#1}, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#2}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#8}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#9}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#2}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#3}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#3}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#3}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if::<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#2}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::remove_if::<<moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#0}, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#1}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::remove_if::<_, _>
367
368
    /// Removes a key from the map if a condition is met, returning the result
369
    /// of invoking a function with a reference to the key-value pair previously
370
    /// corresponding to the key.
371
    ///
372
    /// `condition` will be invoked at least once if [`Some`] is returned. It
373
    /// may also be invoked one or more times if [`None`] is returned.
374
    ///
375
    /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
376
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
377
    #[inline]
378
0
    pub(crate) fn remove_entry_if_and<T>(
379
0
        &self,
380
0
        hash: u64,
381
0
        eq: impl FnMut(&K) -> bool,
382
0
        condition: impl FnMut(&K, &V) -> bool,
383
0
        with_previous_entry: impl FnOnce(&K, &V) -> T,
384
0
    ) -> Option<T> {
385
0
        self.bucket_array_ref(hash)
386
0
            .remove_entry_if_and(hash, eq, condition, move |k, v| {
387
0
                self.len.fetch_sub(1, Ordering::Relaxed);
388
389
0
                with_previous_entry(k, v)
390
0
            })
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#1}, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#2}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#1}, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#2}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#8}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#9}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#8}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#9}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#2}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#2}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#3}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#3}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#3}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#3}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#3}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#3}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#2}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#2}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<(alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>), <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#4}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#5}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#6}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#0}, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#1}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::remove_if<<moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#0}, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#1}>::{closure#0}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove_entry_if_and::<moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}>::{closure#0}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}>::{closure#1}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove_entry_if_and::<moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}>::{closure#0}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}>::{closure#1}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::remove_entry_if_and::<_, _, _, _>::{closure#0}
391
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#1}, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#2}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#1}, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::invalidate::{closure#2}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#8}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#9}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#8}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#9}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#2}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#2}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#3}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_lru_entries::{closure#3}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#3}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_ao::{closure#3}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#3}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#2}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_expired_wo::{closure#3}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#2}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_if<<moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#1}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::evict_expired_entries_using_timers::{closure#2}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::remove_entry_if_and::<(alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>), <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#4}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#5}, <moka::sync_base::base_cache::Inner<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::handle_upsert::{closure#6}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::remove_entry_if_and::<moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#0}, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#1}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::remove_if<<moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#0}, <moka::sync_base::key_lock::KeyLock<hickory_proto::op::query::Query, std::hash::random::RandomState> as core::ops::drop::Drop>::drop::{closure#1}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove_entry_if_and::<moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}>::{closure#0}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates_registered_before::{closure#2}>::{closure#1}>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove_entry_if_and::<moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>, <moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}>::{closure#0}, <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::remove<<moka::sync_base::invalidator::Invalidator<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue, std::hash::random::RandomState>>::remove_predicates::{closure#0}>::{closure#1}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::remove_entry_if_and::<_, _, _, _>
392
393
    /// If no value corresponds to the key, invoke a default function to insert
394
    /// a new key-value pair into the map. Otherwise, modify the existing value
395
    /// and return a clone of the value previously corresponding to the key.
396
    ///
397
    /// `on_insert` may be invoked, even if [`None`] is returned.
398
    ///
399
    /// `on_modify` will be invoked at least once if [`Some`] is returned. It
400
    /// may also be invoked one or more times if [`None`] is returned.
401
    ///
402
    /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
403
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
404
    #[inline]
405
0
    pub(crate) fn insert_with_or_modify(
406
0
        &self,
407
0
        key: K,
408
0
        hash: u64,
409
0
        on_insert: impl FnOnce() -> V,
410
0
        on_modify: impl FnMut(&K, &V) -> V,
411
0
    ) -> Option<V>
412
0
    where
413
0
        V: Clone,
414
    {
415
0
        self.insert_with_or_modify_entry_and(key, hash, on_insert, on_modify, |_, v| v.clone())
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::insert_with_or_modify::<<moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#1}, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#2}>::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::insert_with_or_modify::<_, _>::{closure#0}
416
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::insert_with_or_modify::<<moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#1}, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#2}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::insert_with_or_modify::<_, _>
417
418
    /// If no value corresponds to the key, invoke a default function to insert
419
    /// a new key-value pair into the map. Otherwise, modify the existing value
420
    /// and return the result of invoking a function with a reference to the
421
    /// key-value pair previously corresponding to the supplied key.
422
    ///
423
    /// `on_insert` may be invoked, even if [`None`] is returned.
424
    ///
425
    /// `on_modify` will be invoked at least once if [`Some`] is returned. It
426
    /// may also be invoked one or more times if [`None`] is returned.
427
    ///
428
    /// [`Some`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.Some
429
    /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
430
    #[inline]
431
0
    pub(crate) fn insert_with_or_modify_entry_and<T>(
432
0
        &self,
433
0
        key: K,
434
0
        hash: u64,
435
0
        on_insert: impl FnOnce() -> V,
436
0
        on_modify: impl FnMut(&K, &V) -> V,
437
0
        with_old_entry: impl FnOnce(&K, &V) -> T,
438
0
    ) -> Option<T> {
439
0
        let result = self.bucket_array_ref(hash).insert_with_or_modify_entry_and(
440
0
            key,
441
0
            hash,
442
0
            on_insert,
443
0
            on_modify,
444
0
            with_old_entry,
445
        );
446
447
0
        if result.is_none() {
448
0
            self.len.fetch_add(1, Ordering::Relaxed);
449
0
        }
450
451
0
        result
452
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::insert_with_or_modify_entry_and::<moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#1}, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#2}, <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::insert_with_or_modify<<moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#1}, <moka::sync_base::base_cache::BaseCache<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>::do_insert_with_hash::{closure#2}>::{closure#0}>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::insert_with_or_modify_entry_and::<_, _, _, _>
453
454
    #[inline]
455
0
    pub(crate) fn insert_if_not_present(&self, key: K, hash: u64, value: V) -> Option<V>
456
0
    where
457
0
        V: Clone,
458
    {
459
0
        let result = self.bucket_array_ref(hash).insert_if_not_present_and(
460
0
            key,
461
0
            hash,
462
            || value,
463
0
            |_, v| v.clone(),
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::insert_if_not_present::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::insert_if_not_present::{closure#1}
464
        );
465
466
0
        if result.is_none() {
467
0
            self.len.fetch_add(1, Ordering::Relaxed);
468
0
        }
469
470
0
        result
471
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::insert_if_not_present
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::insert_if_not_present
472
473
0
    pub(crate) fn keys<T>(&self, segment: usize, with_key: impl FnMut(&K) -> T) -> Option<Vec<T>> {
474
0
        if segment >= self.segments.len() {
475
0
            return None;
476
0
        }
477
478
        let Segment {
479
0
            ref bucket_array,
480
0
            ref len,
481
0
        } = self.segments[segment];
482
483
0
        let bucket_array_ref = BucketArrayRef {
484
0
            bucket_array,
485
0
            build_hasher: &self.build_hasher,
486
0
            len,
487
0
        };
488
489
0
        Some(bucket_array_ref.keys(with_key))
490
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::keys::<alloc::string::String, <alloc::string::String as core::clone::Clone>::clone>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::keys::<_, _>
491
492
0
    pub(crate) fn iter(&self) -> Iter<'_, K, V>
493
0
    where
494
0
        K: Clone,
495
0
        V: Clone,
496
    {
497
0
        Iter::with_single_cache_segment(self, self.actual_num_segments())
498
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::iter
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::iter
499
500
    #[inline]
501
0
    pub(crate) fn hash<Q>(&self, key: &Q) -> u64
502
0
    where
503
0
        Q: Hash + Eq + ?Sized,
504
0
        K: Borrow<Q>,
505
    {
506
0
        bucket::hash(&self.build_hasher, key)
507
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::hash::<alloc::sync::Arc<hickory_proto::op::query::Query>>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::hash::<alloc::string::String>
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::hash::<str>
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::hash::<_>
508
}
509
510
impl<K, V, S> ScanningGet<K, V> for HashMap<K, V, S>
511
where
512
    K: Hash + Eq + Clone,
513
    V: Clone,
514
    S: BuildHasher,
515
{
516
0
    fn scanning_get(&self, key: &K) -> Option<V> {
517
0
        let hash = self.hash(key);
518
0
        self.get_key_value_and_then(hash, |k| k == key, |_k, v| Some(v.clone()))
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as moka::cht::iter::ScanningGet<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::scanning_get::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _> as moka::cht::iter::ScanningGet<_, _>>::scanning_get::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as moka::cht::iter::ScanningGet<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::scanning_get::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _> as moka::cht::iter::ScanningGet<_, _>>::scanning_get::{closure#1}
519
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as moka::cht::iter::ScanningGet<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::scanning_get
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _> as moka::cht::iter::ScanningGet<_, _>>::scanning_get
520
521
0
    fn keys(&self, cht_segment: usize) -> Option<Vec<K>> {
522
0
        self.keys(cht_segment, Clone::clone)
523
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as moka::cht::iter::ScanningGet<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::keys
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _> as moka::cht::iter::ScanningGet<_, _>>::keys
524
}
525
526
impl<K, V, S> Drop for HashMap<K, V, S> {
527
0
    fn drop(&mut self) {
528
        // Important: Since we are using a dummy guard returned by `unprotected`,
529
        // those `defer_*` functions will be executed immediately.
530
0
        let guard = unsafe { &crossbeam_epoch::unprotected() };
531
0
        atomic::fence(Ordering::Acquire);
532
533
        for Segment {
534
0
            bucket_array: this_bucket_array,
535
            ..
536
0
        } in self.segments.iter()
537
        {
538
0
            let mut current_ptr = this_bucket_array.load(Ordering::Relaxed, guard);
539
540
0
            while let Some(current_ref) = unsafe { current_ptr.as_ref() } {
541
0
                let next_ptr = current_ref.next.load(Ordering::Relaxed, guard);
542
543
0
                for this_bucket_ptr in current_ref
544
0
                    .buckets
545
0
                    .iter()
546
0
                    .map(|b| b.load(Ordering::Relaxed, guard))
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>> as core::ops::drop::Drop>::drop::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>> as core::ops::drop::Drop>::drop::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as core::ops::drop::Drop>::drop::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<(alloc::sync::Arc<hickory_proto::op::query::Query>, core::any::TypeId), moka::common::concurrent::arc::MiniArc<lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::sync::value_initializer::WaiterValue<hickory_resolver::dns_lru::LruValue>>>> as core::ops::drop::Drop>::drop::{closure#0}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _> as core::ops::drop::Drop>::drop::{closure#0}
547
0
                    .filter(|p| !p.is_null())
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>> as core::ops::drop::Drop>::drop::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>> as core::ops::drop::Drop>::drop::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as core::ops::drop::Drop>::drop::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<(alloc::sync::Arc<hickory_proto::op::query::Query>, core::any::TypeId), moka::common::concurrent::arc::MiniArc<lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::sync::value_initializer::WaiterValue<hickory_resolver::dns_lru::LruValue>>>> as core::ops::drop::Drop>::drop::{closure#1}
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _> as core::ops::drop::Drop>::drop::{closure#1}
548
                {
549
0
                    if bucket::is_tombstone(this_bucket_ptr) {
550
                        // Only delete tombstones from the newest bucket array.
551
                        // The only way this becomes a memory leak is if there was a
552
                        // panic during a rehash, in which case we are going to say
553
                        // that running destructors and freeing memory is
554
                        // best-effort, and our best effort is to not do it
555
0
                        if next_ptr.is_null() {
556
0
                            // Since this bucket is a tombstone, its value should have
557
0
                            // been dropped already. So, here, we only drop the key.
558
0
                            unsafe { bucket::defer_acquire_destroy(guard, this_bucket_ptr) };
559
0
                        }
560
0
                    } else {
561
0
                        // This bucket is live. Drop its key and value. (Fixes #176)
562
0
                        unsafe { bucket::defer_destroy_bucket(guard, this_bucket_ptr) };
563
0
                    }
564
                }
565
566
0
                unsafe { bucket::defer_acquire_destroy(guard, current_ptr) };
567
568
0
                current_ptr = next_ptr;
569
            }
570
        }
571
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <moka::cht::segment::HashMap<(alloc::sync::Arc<hickory_proto::op::query::Query>, core::any::TypeId), moka::common::concurrent::arc::MiniArc<lock_api::rwlock::RwLock<parking_lot::raw_rwlock::RawRwLock, moka::sync::value_initializer::WaiterValue<hickory_resolver::dns_lru::LruValue>>>> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _> as core::ops::drop::Drop>::drop
572
}
573
574
impl<K, V, S> HashMap<K, V, S> {
575
    #[inline]
576
0
    fn bucket_array_ref(&'_ self, hash: u64) -> BucketArrayRef<'_, K, V, S> {
577
0
        let index = self.segment_index_from_hash(hash);
578
579
        let Segment {
580
0
            ref bucket_array,
581
0
            ref len,
582
0
        } = self.segments[index];
583
584
0
        BucketArrayRef {
585
0
            bucket_array,
586
0
            build_hasher: &self.build_hasher,
587
0
            len,
588
0
        }
589
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::bucket_array_ref
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::bucket_array_ref
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::bucket_array_ref
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::bucket_array_ref
590
591
    #[inline]
592
0
    fn segment_index_from_hash(&'_ self, hash: u64) -> usize {
593
0
        if self.segment_shift == 64 {
594
0
            0
595
        } else {
596
0
            (hash >> self.segment_shift) as usize
597
        }
598
0
    }
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<moka::common::concurrent::ValueEntry<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>>::segment_index_from_hash
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::sync::Arc<hickory_proto::op::query::Query>, moka::common::concurrent::arc::MiniArc<lock_api::mutex::Mutex<parking_lot::raw_mutex::RawMutex, ()>>>>::segment_index_from_hash
Unexecuted instantiation: <moka::cht::segment::HashMap<alloc::string::String, moka::sync_base::invalidator::Predicate<hickory_proto::op::query::Query, hickory_resolver::dns_lru::LruValue>>>::segment_index_from_hash
Unexecuted instantiation: <moka::cht::segment::HashMap<_, _, _>>::segment_index_from_hash
599
}
600
601
struct Segment<K, V> {
602
    bucket_array: Atomic<BucketArray<K, V>>,
603
    len: AtomicUsize,
604
}
605
606
#[cfg(test)]
607
fn default_num_segments() -> usize {
608
    crate::common::available_parallelism() * 2
609
}
610
611
#[cfg(test)]
612
mod tests {
613
    use std::{
614
        collections::BTreeMap,
615
        sync::{Arc, Barrier},
616
        thread::{spawn, JoinHandle},
617
    };
618
619
    use super::*;
620
    use crate::cht::test_util::{run_deferred, DropNotifier, NoisyDropper};
621
622
    #[test]
623
    fn single_segment() {
624
        let map =
625
            HashMap::with_num_segments_capacity_and_hasher(1, 0, DefaultHashBuilder::default());
626
627
        assert!(map.is_empty());
628
        assert_eq!(map.len(), 0);
629
630
        let key = "key1";
631
        let hash = map.hash(key);
632
633
        assert_eq!(map.insert_entry_and(key, hash, 5, |_, v| *v), None);
634
        assert_eq!(map.get(hash, |k| k == &key), Some(5));
635
636
        assert!(!map.is_empty());
637
        assert_eq!(map.len(), 1);
638
639
        assert_eq!(map.remove(hash, |k| k == &key), Some(5));
640
        assert!(map.is_empty());
641
        assert_eq!(map.len(), 0);
642
643
        run_deferred();
644
    }
645
646
    #[test]
647
    fn insert_if_not_present() {
648
        let map =
649
            HashMap::with_num_segments_capacity_and_hasher(1, 0, DefaultHashBuilder::default());
650
651
        let key = "key1";
652
        let hash = map.hash(key);
653
654
        assert_eq!(map.insert_if_not_present(key, hash, 5), None);
655
        assert_eq!(map.get(hash, |k| k == &key), Some(5));
656
657
        assert_eq!(map.insert_if_not_present(key, hash, 6), Some(5));
658
        assert_eq!(map.get(hash, |k| k == &key), Some(5));
659
660
        assert_eq!(map.remove(hash, |k| k == &key), Some(5));
661
662
        assert_eq!(map.insert_if_not_present(key, hash, 7), None);
663
        assert_eq!(map.get(hash, |k| k == &key), Some(7));
664
665
        assert_eq!(map.remove(hash, |k| k == &key), Some(7));
666
        assert!(map.is_empty());
667
        assert_eq!(map.len(), 0);
668
669
        run_deferred();
670
    }
671
672
    #[cfg_attr(mips, ignore)]
673
    #[test]
674
    fn concurrent_insert_if_not_present() {
675
        const NUM_THREADS: usize = 64;
676
        const MAX_VALUE: usize = 512;
677
678
        let hashmap = Arc::new(HashMap::with_capacity(0));
679
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
680
681
        #[allow(clippy::needless_collect)]
682
        let threads: Vec<_> = (0..NUM_THREADS)
683
            .map(|thread_id| {
684
                let hashmap = Arc::clone(&hashmap);
685
                let barrier = Arc::clone(&barrier);
686
687
                spawn(move || {
688
                    barrier.wait();
689
                    let mut success_count = 0usize;
690
691
                    for key in 0..MAX_VALUE {
692
                        let hash = hashmap.hash(&key);
693
                        let result = hashmap.insert_if_not_present(key, hash, thread_id);
694
                        if result.is_none() {
695
                            success_count += 1;
696
                        }
697
                    }
698
699
                    (thread_id, success_count)
700
                })
701
            })
702
            .collect();
703
704
        // Collect the results from the threads and insert into a BTreeMap with
705
        // thread_id as key and success_count as value.
706
        let results1 = threads
707
            .into_iter()
708
            .map(JoinHandle::join)
709
            .collect::<Result<BTreeMap<_, _>, _>>()
710
            .expect("Got an error from a thread");
711
712
        assert_eq!(hashmap.len(), MAX_VALUE);
713
714
        // Verify that the sum of success insertion counts should be MAX_VALUE.
715
        let sum_of_insertions: usize = results1.values().sum();
716
        assert_eq!(sum_of_insertions, MAX_VALUE);
717
718
        // Get all entries from the cht HashMap and turn them into the same format
719
        // (BTreeMap) to results1.
720
721
        // Initialize results2.
722
        let mut results2 = (0..NUM_THREADS)
723
            .map(|thread_id| (thread_id, 0usize))
724
            .collect::<BTreeMap<_, _>>();
725
726
        // Get all entries from the cht MashMap.
727
        for key in 0..MAX_VALUE {
728
            let hash = hashmap.hash(&key);
729
            if let Some(thread_id) = hashmap.get(hash, |&k| k == key) {
730
                let count = results2.get_mut(&thread_id).unwrap();
731
                *count += 1;
732
            }
733
        }
734
735
        // Verify that they are the same.
736
        assert_eq!(results1, results2);
737
738
        run_deferred();
739
    }
740
741
    #[test]
742
    fn insertion() {
743
        const MAX_VALUE: i32 = 512;
744
745
        let map = HashMap::with_capacity(MAX_VALUE as usize);
746
747
        for i in 0..MAX_VALUE {
748
            assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None);
749
750
            assert!(!map.is_empty());
751
            assert_eq!(map.len(), (i + 1) as usize);
752
753
            for j in 0..=i {
754
                let hash = map.hash(&j);
755
                assert_eq!(map.get(hash, |&k| k == j), Some(j));
756
                assert_eq!(map.insert_entry_and(j, hash, j, |_, v| *v), Some(j));
757
            }
758
759
            for l in i + 1..MAX_VALUE {
760
                assert_eq!(map.get(map.hash(&l), |&k| k == l), None);
761
            }
762
        }
763
764
        run_deferred();
765
    }
766
767
    #[test]
768
    fn growth() {
769
        const MAX_VALUE: i32 = 512;
770
771
        let map = HashMap::with_capacity(0);
772
773
        for i in 0..MAX_VALUE {
774
            assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None);
775
776
            assert!(!map.is_empty());
777
            assert_eq!(map.len(), (i + 1) as usize);
778
779
            for j in 0..=i {
780
                let hash = map.hash(&j);
781
                assert_eq!(map.get(hash, |&k| k == j), Some(j));
782
                assert_eq!(map.insert_entry_and(j, hash, j, |_, v| *v), Some(j));
783
            }
784
785
            for l in i + 1..MAX_VALUE {
786
                assert_eq!(map.get(map.hash(&l), |&k| k == l), None);
787
            }
788
        }
789
790
        run_deferred();
791
    }
792
793
    // Ignore this test and some other tests on 32-bit mips targets to avoid the following
794
    // error on QEMU user space emulator:
795
    //
796
    //     memory allocation of 1052 bytes failed
797
    //     process didn't exit successfully: ... (signal: 6, SIGABRT: process abort signal)
798
    #[cfg_attr(mips, ignore)]
799
    #[test]
800
    fn concurrent_insertion() {
801
        const MAX_VALUE: i32 = 512;
802
        const NUM_THREADS: usize = 64;
803
        const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE;
804
805
        let map = Arc::new(HashMap::with_capacity(MAX_INSERTED_VALUE as usize));
806
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
807
808
        #[allow(clippy::needless_collect)]
809
        let threads: Vec<_> = (0..NUM_THREADS)
810
            .map(|i| {
811
                let map = Arc::clone(&map);
812
                let barrier = Arc::clone(&barrier);
813
814
                spawn(move || {
815
                    barrier.wait();
816
817
                    for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) {
818
                        assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None);
819
                    }
820
                })
821
            })
822
            .collect();
823
824
        for result in threads.into_iter().map(JoinHandle::join) {
825
            assert!(result.is_ok());
826
        }
827
828
        assert!(!map.is_empty());
829
        assert_eq!(map.len(), MAX_INSERTED_VALUE as usize);
830
831
        for i in 0..MAX_INSERTED_VALUE {
832
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i));
833
        }
834
835
        run_deferred();
836
    }
837
838
    #[cfg_attr(mips, ignore)]
839
    #[test]
840
    fn concurrent_growth() {
841
        const MAX_VALUE: i32 = 512;
842
        const NUM_THREADS: usize = 64;
843
        const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE;
844
845
        let map = Arc::new(HashMap::with_capacity(0));
846
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
847
848
        #[allow(clippy::needless_collect)]
849
        let threads: Vec<_> = (0..NUM_THREADS)
850
            .map(|i| {
851
                let map = Arc::clone(&map);
852
                let barrier = Arc::clone(&barrier);
853
854
                spawn(move || {
855
                    barrier.wait();
856
857
                    for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) {
858
                        assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None);
859
                    }
860
                })
861
            })
862
            .collect();
863
864
        for result in threads.into_iter().map(|t| t.join()) {
865
            assert!(result.is_ok());
866
        }
867
868
        assert!(!map.is_empty());
869
        assert_eq!(map.len(), MAX_INSERTED_VALUE as usize);
870
871
        for i in 0..MAX_INSERTED_VALUE {
872
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i));
873
        }
874
875
        run_deferred();
876
    }
877
878
    #[test]
879
    fn removal() {
880
        const MAX_VALUE: i32 = 512;
881
882
        let map = HashMap::with_capacity(MAX_VALUE as usize);
883
884
        for i in 0..MAX_VALUE {
885
            assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None);
886
        }
887
888
        for i in 0..MAX_VALUE {
889
            assert_eq!(map.remove(map.hash(&i), |&k| k == i), Some(i));
890
        }
891
892
        assert!(map.is_empty());
893
        assert_eq!(map.len(), 0);
894
895
        for i in 0..MAX_VALUE {
896
            assert_eq!(map.get(map.hash(&i), |&k| k == i), None);
897
        }
898
899
        run_deferred();
900
    }
901
902
    #[cfg_attr(mips, ignore)]
903
    #[test]
904
    fn concurrent_removal() {
905
        const MAX_VALUE: i32 = 512;
906
        const NUM_THREADS: usize = 64;
907
        const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE;
908
909
        let map = HashMap::with_capacity(MAX_INSERTED_VALUE as usize);
910
911
        for i in 0..MAX_INSERTED_VALUE {
912
            assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None);
913
        }
914
915
        let map = Arc::new(map);
916
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
917
918
        #[allow(clippy::needless_collect)]
919
        let threads: Vec<_> = (0..NUM_THREADS)
920
            .map(|i| {
921
                let map = Arc::clone(&map);
922
                let barrier = Arc::clone(&barrier);
923
924
                spawn(move || {
925
                    barrier.wait();
926
927
                    for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) {
928
                        assert_eq!(map.remove(map.hash(&j), |&k| k == j), Some(j));
929
                    }
930
                })
931
            })
932
            .collect();
933
934
        for result in threads.into_iter().map(|t| t.join()) {
935
            assert!(result.is_ok());
936
        }
937
938
        assert_eq!(map.len(), 0);
939
940
        for i in 0..MAX_INSERTED_VALUE {
941
            assert_eq!(map.get(map.hash(&i), |&k| k == i), None);
942
        }
943
944
        run_deferred();
945
    }
946
947
    #[cfg_attr(mips, ignore)]
948
    #[test]
949
    fn concurrent_insertion_and_removal() {
950
        const MAX_VALUE: i32 = 512;
951
        const NUM_THREADS: usize = 64;
952
        const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE * 2;
953
        const INSERTED_MIDPOINT: i32 = MAX_INSERTED_VALUE / 2;
954
955
        let map = HashMap::with_capacity(MAX_INSERTED_VALUE as usize);
956
957
        for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE {
958
            assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None);
959
        }
960
961
        let map = Arc::new(map);
962
        let barrier = Arc::new(Barrier::new(NUM_THREADS * 2));
963
964
        #[allow(clippy::needless_collect)]
965
        let insert_threads: Vec<_> = (0..NUM_THREADS)
966
            .map(|i| {
967
                let map = Arc::clone(&map);
968
                let barrier = Arc::clone(&barrier);
969
970
                spawn(move || {
971
                    barrier.wait();
972
973
                    for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) {
974
                        assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None);
975
                    }
976
                })
977
            })
978
            .collect();
979
980
        #[allow(clippy::needless_collect)]
981
        let remove_threads: Vec<_> = (0..NUM_THREADS)
982
            .map(|i| {
983
                let map = Arc::clone(&map);
984
                let barrier = Arc::clone(&barrier);
985
986
                spawn(move || {
987
                    barrier.wait();
988
989
                    for j in (0..MAX_VALUE).map(|j| INSERTED_MIDPOINT + j + (i as i32 * MAX_VALUE))
990
                    {
991
                        assert_eq!(map.remove(map.hash(&j), |&k| k == j), Some(j));
992
                    }
993
                })
994
            })
995
            .collect();
996
997
        for result in insert_threads
998
            .into_iter()
999
            .chain(remove_threads)
1000
            .map(|t| t.join())
1001
        {
1002
            assert!(result.is_ok());
1003
        }
1004
1005
        assert!(!map.is_empty());
1006
        assert_eq!(map.len(), INSERTED_MIDPOINT as usize);
1007
1008
        for i in 0..INSERTED_MIDPOINT {
1009
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i));
1010
        }
1011
1012
        for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE {
1013
            assert_eq!(map.get(map.hash(&i), |&k| k == i), None);
1014
        }
1015
1016
        run_deferred();
1017
    }
1018
1019
    #[cfg_attr(mips, ignore)]
1020
    #[test]
1021
    fn concurrent_growth_and_removal() {
1022
        const MAX_VALUE: i32 = 512;
1023
        const NUM_THREADS: usize = 64;
1024
        const MAX_INSERTED_VALUE: i32 = (NUM_THREADS as i32) * MAX_VALUE * 2;
1025
        const INSERTED_MIDPOINT: i32 = MAX_INSERTED_VALUE / 2;
1026
1027
        let map = HashMap::with_capacity(INSERTED_MIDPOINT as usize);
1028
1029
        for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE {
1030
            assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None);
1031
        }
1032
1033
        let map = Arc::new(map);
1034
        let barrier = Arc::new(Barrier::new(NUM_THREADS * 2));
1035
1036
        #[allow(clippy::needless_collect)]
1037
        let insert_threads: Vec<_> = (0..NUM_THREADS)
1038
            .map(|i| {
1039
                let map = Arc::clone(&map);
1040
                let barrier = Arc::clone(&barrier);
1041
1042
                spawn(move || {
1043
                    barrier.wait();
1044
1045
                    for j in (0..MAX_VALUE).map(|j| j + (i as i32 * MAX_VALUE)) {
1046
                        assert_eq!(map.insert_entry_and(j, map.hash(&j), j, |_, v| *v), None);
1047
                    }
1048
                })
1049
            })
1050
            .collect();
1051
1052
        #[allow(clippy::needless_collect)]
1053
        let remove_threads: Vec<_> = (0..NUM_THREADS)
1054
            .map(|i| {
1055
                let map = Arc::clone(&map);
1056
                let barrier = Arc::clone(&barrier);
1057
1058
                spawn(move || {
1059
                    barrier.wait();
1060
1061
                    for j in (0..MAX_VALUE).map(|j| INSERTED_MIDPOINT + j + (i as i32 * MAX_VALUE))
1062
                    {
1063
                        assert_eq!(map.remove(map.hash(&j), |&k| k == j), Some(j));
1064
                    }
1065
                })
1066
            })
1067
            .collect();
1068
1069
        for result in insert_threads
1070
            .into_iter()
1071
            .chain(remove_threads)
1072
            .map(JoinHandle::join)
1073
        {
1074
            assert!(result.is_ok());
1075
        }
1076
1077
        assert!(!map.is_empty());
1078
        assert_eq!(map.len(), INSERTED_MIDPOINT as usize);
1079
1080
        for i in 0..INSERTED_MIDPOINT {
1081
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i));
1082
        }
1083
1084
        for i in INSERTED_MIDPOINT..MAX_INSERTED_VALUE {
1085
            assert_eq!(map.get(map.hash(&i), |&k| k == i), None);
1086
        }
1087
1088
        run_deferred();
1089
    }
1090
1091
    #[test]
1092
    fn insert_with_or_modify() {
1093
        let map = HashMap::with_capacity(0);
1094
1095
        let key = "key1";
1096
        let hash = map.hash(&key);
1097
1098
        assert_eq!(
1099
            map.insert_with_or_modify(key, hash, || 1, |_, x| x + 1),
1100
            None
1101
        );
1102
        assert_eq!(map.get(hash, |&k| k == key), Some(1));
1103
1104
        assert_eq!(
1105
            map.insert_with_or_modify(key, hash, || 1, |_, x| x + 1),
1106
            Some(1)
1107
        );
1108
        assert_eq!(map.get(hash, |&k| k == key), Some(2));
1109
1110
        run_deferred();
1111
    }
1112
1113
    #[cfg_attr(mips, ignore)]
1114
    #[test]
1115
    fn concurrent_insert_with_or_modify() {
1116
        const NUM_THREADS: usize = 64;
1117
        const MAX_VALUE: i32 = 512;
1118
1119
        let map = Arc::new(HashMap::with_capacity(0));
1120
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
1121
1122
        #[allow(clippy::needless_collect)]
1123
        let threads: Vec<_> = (0..NUM_THREADS)
1124
            .map(|_| {
1125
                let map = Arc::clone(&map);
1126
                let barrier = Arc::clone(&barrier);
1127
1128
                spawn(move || {
1129
                    barrier.wait();
1130
1131
                    for j in 0..MAX_VALUE {
1132
                        map.insert_with_or_modify(j, map.hash(&j), || 1, |_, x| x + 1);
1133
                    }
1134
                })
1135
            })
1136
            .collect();
1137
1138
        for result in threads.into_iter().map(JoinHandle::join) {
1139
            assert!(result.is_ok());
1140
        }
1141
1142
        assert_eq!(map.len(), MAX_VALUE as usize);
1143
1144
        for i in 0..MAX_VALUE {
1145
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(NUM_THREADS as i32));
1146
        }
1147
1148
        run_deferred();
1149
    }
1150
1151
    #[cfg_attr(mips, ignore)]
1152
    #[test]
1153
    fn concurrent_overlapped_insertion() {
1154
        const NUM_THREADS: usize = 64;
1155
        const MAX_VALUE: i32 = 512;
1156
1157
        let map = Arc::new(HashMap::with_capacity(MAX_VALUE as usize));
1158
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
1159
1160
        #[allow(clippy::needless_collect)]
1161
        let threads: Vec<_> = (0..NUM_THREADS)
1162
            .map(|_| {
1163
                let map = Arc::clone(&map);
1164
                let barrier = Arc::clone(&barrier);
1165
1166
                spawn(move || {
1167
                    barrier.wait();
1168
1169
                    for j in 0..MAX_VALUE {
1170
                        map.insert_entry_and(j, map.hash(&j), j, |_, v| *v);
1171
                    }
1172
                })
1173
            })
1174
            .collect();
1175
1176
        for result in threads.into_iter().map(JoinHandle::join) {
1177
            assert!(result.is_ok());
1178
        }
1179
1180
        assert_eq!(map.len(), MAX_VALUE as usize);
1181
1182
        for i in 0..MAX_VALUE {
1183
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i));
1184
        }
1185
1186
        run_deferred();
1187
    }
1188
1189
    // Ignore this test on 32-bit mips and armv5te targets to avoid the following
1190
    // error on QEMU user space emulator:
1191
    //
1192
    // (mips)
1193
    //     memory allocation of 1052 bytes failed
1194
    //     process didn't exit successfully: ... (signal: 6, SIGABRT: process abort signal)
1195
    //
1196
    // (armv5te)
1197
    //     process didn't exit successfully: ... (signal: 4, SIGILL: illegal instruction)
1198
    //
1199
    #[cfg_attr(any(armv5te, mips), ignore)]
1200
    #[test]
1201
    fn concurrent_overlapped_growth() {
1202
        const NUM_THREADS: usize = 64;
1203
        const MAX_VALUE: i32 = 512;
1204
1205
        let map = Arc::new(HashMap::with_capacity(1));
1206
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
1207
1208
        #[allow(clippy::needless_collect)]
1209
        let threads: Vec<_> = (0..NUM_THREADS)
1210
            .map(|_| {
1211
                let map = Arc::clone(&map);
1212
                let barrier = Arc::clone(&barrier);
1213
1214
                spawn(move || {
1215
                    barrier.wait();
1216
1217
                    for j in 0..MAX_VALUE {
1218
                        map.insert_entry_and(j, map.hash(&j), j, |_, v| *v);
1219
                    }
1220
                })
1221
            })
1222
            .collect();
1223
1224
        for result in threads.into_iter().map(JoinHandle::join) {
1225
            assert!(result.is_ok());
1226
        }
1227
1228
        assert_eq!(map.len(), MAX_VALUE as usize);
1229
1230
        for i in 0..MAX_VALUE {
1231
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i));
1232
        }
1233
1234
        run_deferred();
1235
    }
1236
1237
    #[cfg_attr(mips, ignore)]
1238
    #[test]
1239
    fn concurrent_overlapped_removal() {
1240
        const NUM_THREADS: usize = 64;
1241
        const MAX_VALUE: i32 = 512;
1242
1243
        let map = HashMap::with_capacity(MAX_VALUE as usize);
1244
1245
        for i in 0..MAX_VALUE {
1246
            map.insert_entry_and(i, map.hash(&i), i, |_, v| *v);
1247
        }
1248
1249
        let map = Arc::new(map);
1250
        let barrier = Arc::new(Barrier::new(NUM_THREADS));
1251
1252
        #[allow(clippy::needless_collect)]
1253
        let threads: Vec<_> = (0..NUM_THREADS)
1254
            .map(|_| {
1255
                let map = Arc::clone(&map);
1256
                let barrier = Arc::clone(&barrier);
1257
1258
                spawn(move || {
1259
                    barrier.wait();
1260
1261
                    for j in 0..MAX_VALUE {
1262
                        let prev_value = map.remove(map.hash(&j), |&k| k == j);
1263
1264
                        if let Some(v) = prev_value {
1265
                            assert_eq!(v, j);
1266
                        }
1267
                    }
1268
                })
1269
            })
1270
            .collect();
1271
1272
        for result in threads.into_iter().map(JoinHandle::join) {
1273
            assert!(result.is_ok());
1274
        }
1275
1276
        assert!(map.is_empty());
1277
        assert_eq!(map.len(), 0);
1278
1279
        for i in 0..MAX_VALUE {
1280
            assert_eq!(map.get(map.hash(&i), |&k| k == i), None);
1281
        }
1282
1283
        run_deferred();
1284
    }
1285
1286
    #[test]
1287
    fn drop_value() {
1288
        let key_parent = Arc::new(DropNotifier::new());
1289
        let value_parent = Arc::new(DropNotifier::new());
1290
1291
        {
1292
            let map = HashMap::with_capacity(0);
1293
            let hash = map.hash(&0);
1294
1295
            assert_eq!(
1296
                map.insert_entry_and(
1297
                    NoisyDropper::new(Arc::clone(&key_parent), 0),
1298
                    hash,
1299
                    NoisyDropper::new(Arc::clone(&value_parent), 0),
1300
                    |_, _| ()
1301
                ),
1302
                None
1303
            );
1304
            assert!(!map.is_empty());
1305
            assert_eq!(map.len(), 1);
1306
            map.get_key_value_and(hash, |k| k == &0, |_k, v| assert_eq!(v, &0));
1307
1308
            map.remove_entry_if_and(hash, |k| k == &0, |_, _| true, |_k, v| assert_eq!(v, &0));
1309
            assert!(map.is_empty());
1310
            assert_eq!(map.len(), 0);
1311
            assert_eq!(map.get_key_value_and(hash, |k| k == &0, |_, _| ()), None);
1312
1313
            run_deferred();
1314
1315
            assert!(!key_parent.was_dropped());
1316
            assert!(value_parent.was_dropped());
1317
        }
1318
1319
        run_deferred();
1320
1321
        assert!(key_parent.was_dropped());
1322
        assert!(value_parent.was_dropped());
1323
    }
1324
1325
    #[test]
1326
    fn drop_many_values() {
1327
        const NUM_VALUES: usize = 1 << 16;
1328
1329
        let key_parents: Vec<_> = std::iter::repeat_with(|| Arc::new(DropNotifier::new()))
1330
            .take(NUM_VALUES)
1331
            .collect();
1332
        let value_parents: Vec<_> = std::iter::repeat_with(|| Arc::new(DropNotifier::new()))
1333
            .take(NUM_VALUES)
1334
            .collect();
1335
1336
        {
1337
            let map = HashMap::with_capacity(0);
1338
            assert!(map.is_empty());
1339
            assert_eq!(map.len(), 0);
1340
1341
            for (i, (this_key_parent, this_value_parent)) in
1342
                key_parents.iter().zip(value_parents.iter()).enumerate()
1343
            {
1344
                assert_eq!(
1345
                    map.insert_entry_and(
1346
                        NoisyDropper::new(Arc::clone(this_key_parent), i),
1347
                        map.hash(&i),
1348
                        NoisyDropper::new(Arc::clone(this_value_parent), i),
1349
                        |_, _| ()
1350
                    ),
1351
                    None
1352
                );
1353
1354
                assert!(!map.is_empty());
1355
                assert_eq!(map.len(), i + 1);
1356
            }
1357
1358
            for i in 0..NUM_VALUES {
1359
                assert_eq!(
1360
                    map.get_key_value_and(
1361
                        map.hash(&i),
1362
                        |k| k == &i,
1363
                        |k, v| {
1364
                            assert_eq!(**k, i);
1365
                            assert_eq!(*v, i);
1366
                        }
1367
                    ),
1368
                    Some(())
1369
                );
1370
            }
1371
1372
            for i in 0..NUM_VALUES {
1373
                assert_eq!(
1374
                    map.remove_entry_if_and(
1375
                        map.hash(&i),
1376
                        |k| k == &i,
1377
                        |_, _| true,
1378
                        |k, v| {
1379
                            assert_eq!(**k, i);
1380
                            assert_eq!(*v, i);
1381
                        }
1382
                    ),
1383
                    Some(())
1384
                );
1385
            }
1386
1387
            assert!(map.is_empty());
1388
            assert_eq!(map.len(), 0);
1389
1390
            run_deferred();
1391
1392
            let live_key_count =
1393
                NUM_VALUES - key_parents.iter().filter(|k| k.was_dropped()).count();
1394
            let bucket_array_len = map.capacity() * 2;
1395
            assert_eq!(bucket_array_len, map.num_segments() * 128 * 2);
1396
            assert!(live_key_count <= bucket_array_len / 10);
1397
1398
            for this_value_parent in value_parents.iter() {
1399
                assert!(this_value_parent.was_dropped());
1400
            }
1401
1402
            for i in 0..NUM_VALUES {
1403
                assert_eq!(
1404
                    map.get_key_value_and(map.hash(&i), |k| k == &i, |_, _| ()),
1405
                    None
1406
                );
1407
            }
1408
        } // The map should be dropped here.
1409
1410
        run_deferred();
1411
1412
        for this_key_parent in key_parents.into_iter() {
1413
            assert!(this_key_parent.was_dropped());
1414
        }
1415
1416
        for this_value_parent in value_parents.into_iter() {
1417
            assert!(this_value_parent.was_dropped());
1418
        }
1419
    }
1420
1421
    #[test]
1422
    fn drop_many_values_concurrent() {
1423
        const NUM_THREADS: usize = 64;
1424
        const NUM_VALUES_PER_THREAD: usize = 512;
1425
        const NUM_VALUES: usize = NUM_THREADS * NUM_VALUES_PER_THREAD;
1426
1427
        let key_parents: Arc<Vec<_>> = Arc::new(
1428
            std::iter::repeat_with(|| Arc::new(DropNotifier::new()))
1429
                .take(NUM_VALUES)
1430
                .collect(),
1431
        );
1432
        let value_parents: Arc<Vec<_>> = Arc::new(
1433
            std::iter::repeat_with(|| Arc::new(DropNotifier::new()))
1434
                .take(NUM_VALUES)
1435
                .collect(),
1436
        );
1437
1438
        {
1439
            let map = Arc::new(HashMap::with_capacity(0));
1440
            assert!(map.is_empty());
1441
            assert_eq!(map.len(), 0);
1442
1443
            let barrier = Arc::new(Barrier::new(NUM_THREADS));
1444
1445
            #[allow(clippy::needless_collect)]
1446
            let handles: Vec<_> = (0..NUM_THREADS)
1447
                .map(|i| {
1448
                    let map = Arc::clone(&map);
1449
                    let barrier = Arc::clone(&barrier);
1450
                    let key_parents = Arc::clone(&key_parents);
1451
                    let value_parents = Arc::clone(&value_parents);
1452
1453
                    spawn(move || {
1454
                        barrier.wait();
1455
1456
                        let these_key_parents = &key_parents
1457
                            [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD];
1458
                        let these_value_parents = &value_parents
1459
                            [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD];
1460
1461
                        for (j, (this_key_parent, this_value_parent)) in these_key_parents
1462
                            .iter()
1463
                            .zip(these_value_parents.iter())
1464
                            .enumerate()
1465
                        {
1466
                            let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32;
1467
                            let hash = map.hash(&key_value);
1468
1469
                            assert_eq!(
1470
                                map.insert_entry_and(
1471
                                    NoisyDropper::new(Arc::clone(this_key_parent), key_value),
1472
                                    hash,
1473
                                    NoisyDropper::new(Arc::clone(this_value_parent), key_value),
1474
                                    |_, _| ()
1475
                                ),
1476
                                None
1477
                            );
1478
                        }
1479
                    })
1480
                })
1481
                .collect();
1482
1483
            for result in handles.into_iter().map(JoinHandle::join) {
1484
                assert!(result.is_ok());
1485
            }
1486
1487
            assert!(!map.is_empty());
1488
            assert_eq!(map.len(), NUM_VALUES);
1489
1490
            run_deferred();
1491
1492
            for this_key_parent in key_parents.iter() {
1493
                assert!(!this_key_parent.was_dropped());
1494
            }
1495
1496
            for this_value_parent in value_parents.iter() {
1497
                assert!(!this_value_parent.was_dropped());
1498
            }
1499
1500
            for i in (0..NUM_VALUES).map(|i| i as i32) {
1501
                assert_eq!(
1502
                    map.get_key_value_and(
1503
                        map.hash(&i),
1504
                        |k| k == &i,
1505
                        |k, v| {
1506
                            assert_eq!(**k, i);
1507
                            assert_eq!(*v, i);
1508
                        }
1509
                    ),
1510
                    Some(())
1511
                );
1512
            }
1513
1514
            #[allow(clippy::needless_collect)]
1515
            let handles: Vec<_> = (0..NUM_THREADS)
1516
                .map(|i| {
1517
                    let map = Arc::clone(&map);
1518
                    let barrier = Arc::clone(&barrier);
1519
1520
                    spawn(move || {
1521
                        barrier.wait();
1522
1523
                        for j in 0..NUM_VALUES_PER_THREAD {
1524
                            let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32;
1525
1526
                            assert_eq!(
1527
                                map.remove_entry_if_and(
1528
                                    map.hash(&key_value),
1529
                                    |k| k == &key_value,
1530
                                    |_, _| true,
1531
                                    |k, v| {
1532
                                        assert_eq!(**k, key_value);
1533
                                        assert_eq!(*v, key_value);
1534
                                    }
1535
                                ),
1536
                                Some(())
1537
                            );
1538
                        }
1539
                    })
1540
                })
1541
                .collect();
1542
1543
            for result in handles.into_iter().map(JoinHandle::join) {
1544
                assert!(result.is_ok());
1545
            }
1546
1547
            assert!(map.is_empty());
1548
            assert_eq!(map.len(), 0);
1549
1550
            run_deferred();
1551
1552
            let live_key_count =
1553
                NUM_VALUES - key_parents.iter().filter(|k| k.was_dropped()).count();
1554
            let bucket_array_len = map.capacity() * 2;
1555
            assert_eq!(bucket_array_len, map.num_segments() * 128 * 2);
1556
            assert!(live_key_count <= bucket_array_len / 10);
1557
1558
            for this_value_parent in value_parents.iter() {
1559
                assert!(this_value_parent.was_dropped());
1560
            }
1561
1562
            for i in (0..NUM_VALUES).map(|i| i as i32) {
1563
                assert_eq!(
1564
                    map.get_key_value_and(map.hash(&i), |k| k == &i, |_, _| ()),
1565
                    None
1566
                );
1567
            }
1568
        } // The map should be dropped here.
1569
1570
        run_deferred();
1571
1572
        for this_key_parent in key_parents.iter() {
1573
            assert!(this_key_parent.was_dropped());
1574
        }
1575
1576
        for this_value_parent in value_parents.iter() {
1577
            assert!(this_value_parent.was_dropped());
1578
        }
1579
    }
1580
1581
    #[test]
1582
    fn drop_map_after_concurrent_updates() {
1583
        const NUM_THREADS: usize = 64;
1584
        const NUM_VALUES_PER_THREAD: usize = 512;
1585
        const NUM_VALUES: usize = NUM_THREADS * NUM_VALUES_PER_THREAD;
1586
1587
        let key_parents: Arc<Vec<_>> = Arc::new(
1588
            std::iter::repeat_with(|| Arc::new(DropNotifier::new()))
1589
                .take(NUM_VALUES)
1590
                .collect(),
1591
        );
1592
        let value_parents: Arc<Vec<_>> = Arc::new(
1593
            std::iter::repeat_with(|| Arc::new(DropNotifier::new()))
1594
                .take(NUM_VALUES)
1595
                .collect(),
1596
        );
1597
1598
        {
1599
            let map = Arc::new(HashMap::with_capacity(0));
1600
            assert!(map.is_empty());
1601
            assert_eq!(map.len(), 0);
1602
1603
            let barrier = Arc::new(Barrier::new(NUM_THREADS));
1604
1605
            #[allow(clippy::needless_collect)]
1606
            let handles: Vec<_> = (0..NUM_THREADS)
1607
                .map(|i| {
1608
                    let map = Arc::clone(&map);
1609
                    let barrier = Arc::clone(&barrier);
1610
                    let key_parents = Arc::clone(&key_parents);
1611
                    let value_parents = Arc::clone(&value_parents);
1612
1613
                    spawn(move || {
1614
                        barrier.wait();
1615
1616
                        let these_key_parents = &key_parents
1617
                            [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD];
1618
                        let these_value_parents = &value_parents
1619
                            [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD];
1620
1621
                        for (j, (this_key_parent, this_value_parent)) in these_key_parents
1622
                            .iter()
1623
                            .zip(these_value_parents.iter())
1624
                            .enumerate()
1625
                        {
1626
                            let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32;
1627
                            let hash = map.hash(&key_value);
1628
1629
                            assert_eq!(
1630
                                map.insert_entry_and(
1631
                                    NoisyDropper::new(Arc::clone(this_key_parent), key_value),
1632
                                    hash,
1633
                                    NoisyDropper::new(Arc::clone(this_value_parent), key_value),
1634
                                    |_, _| ()
1635
                                ),
1636
                                None
1637
                            );
1638
                        }
1639
                    })
1640
                })
1641
                .collect();
1642
1643
            for result in handles.into_iter().map(JoinHandle::join) {
1644
                assert!(result.is_ok());
1645
            }
1646
1647
            assert!(!map.is_empty());
1648
            assert_eq!(map.len(), NUM_VALUES);
1649
1650
            run_deferred();
1651
1652
            for this_key_parent in key_parents.iter() {
1653
                assert!(!this_key_parent.was_dropped());
1654
            }
1655
1656
            for this_value_parent in value_parents.iter() {
1657
                assert!(!this_value_parent.was_dropped());
1658
            }
1659
1660
            for i in (0..NUM_VALUES).map(|i| i as i32) {
1661
                assert_eq!(
1662
                    map.get_key_value_and(
1663
                        map.hash(&i),
1664
                        |k| k == &i,
1665
                        |k, v| {
1666
                            assert_eq!(**k, i);
1667
                            assert_eq!(*v, i);
1668
                        }
1669
                    ),
1670
                    Some(())
1671
                );
1672
            }
1673
1674
            #[allow(clippy::needless_collect)]
1675
            let handles: Vec<_> = (0..NUM_THREADS)
1676
                .map(|i| {
1677
                    let map = Arc::clone(&map);
1678
                    let barrier = Arc::clone(&barrier);
1679
1680
                    spawn(move || {
1681
                        barrier.wait();
1682
1683
                        for j in 0..NUM_VALUES_PER_THREAD {
1684
                            let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32;
1685
1686
                            if key_value % 4 == 0 {
1687
                                assert_eq!(
1688
                                    map.remove_entry_if_and(
1689
                                        map.hash(&key_value),
1690
                                        |k| k == &key_value,
1691
                                        |_, _| true,
1692
                                        |k, v| {
1693
                                            assert_eq!(**k, key_value);
1694
                                            assert_eq!(*v, key_value);
1695
                                        }
1696
                                    ),
1697
                                    Some(())
1698
                                );
1699
                            }
1700
                        }
1701
                    })
1702
                })
1703
                .collect();
1704
1705
            for result in handles.into_iter().map(JoinHandle::join) {
1706
                assert!(result.is_ok());
1707
            }
1708
1709
            assert!(!map.is_empty());
1710
            assert_eq!(map.len(), NUM_VALUES / 4 * 3);
1711
        } // The map should be dropped here.
1712
1713
        run_deferred();
1714
1715
        for this_key_parent in key_parents.iter() {
1716
            assert!(this_key_parent.was_dropped());
1717
        }
1718
1719
        for this_value_parent in value_parents.iter() {
1720
            assert!(this_value_parent.was_dropped());
1721
        }
1722
    }
1723
1724
    #[test]
1725
    fn remove_if() {
1726
        const NUM_VALUES: i32 = 512;
1727
1728
        let is_even = |_: &i32, v: &i32| *v % 2 == 0;
1729
1730
        let map = HashMap::with_capacity(0);
1731
1732
        for i in 0..NUM_VALUES {
1733
            assert_eq!(map.insert_entry_and(i, map.hash(&i), i, |_, v| *v), None);
1734
        }
1735
1736
        for i in 0..NUM_VALUES {
1737
            if is_even(&i, &i) {
1738
                assert_eq!(map.remove_if(map.hash(&i), |&k| k == i, is_even), Some(i));
1739
            } else {
1740
                assert_eq!(map.remove_if(map.hash(&i), |&k| k == i, is_even), None);
1741
            }
1742
        }
1743
1744
        for i in (0..NUM_VALUES).filter(|i| i % 2 == 0) {
1745
            assert_eq!(map.get(map.hash(&i), |&k| k == i), None);
1746
        }
1747
1748
        for i in (0..NUM_VALUES).filter(|i| i % 2 != 0) {
1749
            assert_eq!(map.get(map.hash(&i), |&k| k == i), Some(i));
1750
        }
1751
1752
        run_deferred();
1753
    }
1754
1755
    #[test]
1756
    fn keys_in_single_segment() {
1757
        let map =
1758
            HashMap::with_num_segments_capacity_and_hasher(1, 0, DefaultHashBuilder::default());
1759
1760
        assert!(map.is_empty());
1761
        assert_eq!(map.len(), 0);
1762
1763
        const NUM_KEYS: usize = 200;
1764
1765
        for i in 0..NUM_KEYS {
1766
            let hash = map.hash(&i);
1767
            assert_eq!(map.insert_entry_and(i, hash, i, |_, v| *v), None);
1768
        }
1769
1770
        assert!(!map.is_empty());
1771
        assert_eq!(map.len(), NUM_KEYS);
1772
1773
        let mut keys = map.keys(0, |k| *k).unwrap();
1774
        assert_eq!(keys.len(), NUM_KEYS);
1775
        keys.sort_unstable();
1776
1777
        for (i, key) in keys.into_iter().enumerate() {
1778
            assert_eq!(i, key);
1779
        }
1780
1781
        for i in (0..NUM_KEYS).step_by(2) {
1782
            assert_eq!(map.remove(map.hash(&i), |&k| k == i), Some(i));
1783
        }
1784
1785
        assert!(!map.is_empty());
1786
        assert_eq!(map.len(), NUM_KEYS / 2);
1787
1788
        let mut keys = map.keys(0, |k| *k).unwrap();
1789
        assert_eq!(keys.len(), NUM_KEYS / 2);
1790
        keys.sort_unstable();
1791
1792
        for (i, key) in keys.into_iter().enumerate() {
1793
            assert_eq!(i, key / 2);
1794
        }
1795
1796
        run_deferred();
1797
    }
1798
}