Coverage Report

Created: 2025-10-29 07:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/hashbrown-0.15.2/src/raw/mod.rs
Line
Count
Source
1
use crate::alloc::alloc::{handle_alloc_error, Layout};
2
use crate::control::{BitMaskIter, Group, Tag, TagSliceExt};
3
use crate::scopeguard::{guard, ScopeGuard};
4
use crate::util::{invalid_mut, likely, unlikely};
5
use crate::TryReserveError;
6
use core::array;
7
use core::iter::FusedIterator;
8
use core::marker::PhantomData;
9
use core::mem;
10
use core::ptr::NonNull;
11
use core::slice;
12
use core::{hint, ptr};
13
14
mod alloc;
15
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
16
17
#[inline]
18
0
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
19
0
    to.offset_from(from) as usize
20
0
}
Unexecuted instantiation: hashbrown::raw::offset_from::<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>
Unexecuted instantiation: hashbrown::raw::offset_from::<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>
Unexecuted instantiation: hashbrown::raw::offset_from::<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>
Unexecuted instantiation: hashbrown::raw::offset_from::<usize>
Unexecuted instantiation: hashbrown::raw::offset_from::<usize>
Unexecuted instantiation: hashbrown::raw::offset_from::<_>
21
22
/// Whether memory allocation errors should return an error or abort.
23
#[derive(Copy, Clone)]
24
enum Fallibility {
25
    Fallible,
26
    Infallible,
27
}
28
29
impl Fallibility {
30
    /// Error to return on capacity overflow.
31
    #[cfg_attr(feature = "inline-more", inline)]
32
0
    fn capacity_overflow(self) -> TryReserveError {
33
0
        match self {
34
0
            Fallibility::Fallible => TryReserveError::CapacityOverflow,
35
0
            Fallibility::Infallible => panic!("Hash table capacity overflow"),
36
        }
37
0
    }
Unexecuted instantiation: <hashbrown::raw::Fallibility>::capacity_overflow
Unexecuted instantiation: <hashbrown::raw::Fallibility>::capacity_overflow
Unexecuted instantiation: <hashbrown::raw::Fallibility>::capacity_overflow
Unexecuted instantiation: <hashbrown::raw::Fallibility>::capacity_overflow
38
39
    /// Error to return on allocation error.
40
    #[cfg_attr(feature = "inline-more", inline)]
41
0
    fn alloc_err(self, layout: Layout) -> TryReserveError {
42
0
        match self {
43
0
            Fallibility::Fallible => TryReserveError::AllocError { layout },
44
0
            Fallibility::Infallible => handle_alloc_error(layout),
45
        }
46
0
    }
Unexecuted instantiation: <hashbrown::raw::Fallibility>::alloc_err
Unexecuted instantiation: <hashbrown::raw::Fallibility>::alloc_err
Unexecuted instantiation: <hashbrown::raw::Fallibility>::alloc_err
47
}
48
49
trait SizedTypeProperties: Sized {
50
    const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
51
    const NEEDS_DROP: bool = mem::needs_drop::<Self>();
52
}
53
54
impl<T> SizedTypeProperties for T {}
55
56
/// Primary hash function, used to select the initial bucket to probe from.
57
#[inline]
58
#[allow(clippy::cast_possible_truncation)]
59
0
fn h1(hash: u64) -> usize {
60
    // On 32-bit platforms we simply ignore the higher hash bits.
61
0
    hash as usize
62
0
}
Unexecuted instantiation: hashbrown::raw::h1
Unexecuted instantiation: hashbrown::raw::h1
Unexecuted instantiation: hashbrown::raw::h1
Unexecuted instantiation: hashbrown::raw::h1
63
64
/// Probe sequence based on triangular numbers, which is guaranteed (since our
65
/// table size is a power of two) to visit every group of elements exactly once.
66
///
67
/// A triangular probe has us jump by 1 more group every time. So first we
68
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
69
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
70
///
71
/// Proof that the probe will visit every group in the table:
72
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
73
#[derive(Clone)]
74
struct ProbeSeq {
75
    pos: usize,
76
    stride: usize,
77
}
78
79
impl ProbeSeq {
80
    #[inline]
81
0
    fn move_next(&mut self, bucket_mask: usize) {
82
        // We should have found an empty bucket by now and ended the probe.
83
0
        debug_assert!(
84
0
            self.stride <= bucket_mask,
85
0
            "Went past end of probe sequence"
86
        );
87
88
0
        self.stride += Group::WIDTH;
89
0
        self.pos += self.stride;
90
0
        self.pos &= bucket_mask;
91
0
    }
Unexecuted instantiation: <hashbrown::raw::ProbeSeq>::move_next
Unexecuted instantiation: <hashbrown::raw::ProbeSeq>::move_next
Unexecuted instantiation: <hashbrown::raw::ProbeSeq>::move_next
Unexecuted instantiation: <hashbrown::raw::ProbeSeq>::move_next
92
}
93
94
/// Returns the number of buckets needed to hold the given number of items,
95
/// taking the maximum load factor into account.
96
///
97
/// Returns `None` if an overflow occurs.
98
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
99
#[cfg_attr(target_os = "emscripten", inline(never))]
100
#[cfg_attr(not(target_os = "emscripten"), inline)]
101
0
fn capacity_to_buckets(cap: usize) -> Option<usize> {
102
0
    debug_assert_ne!(cap, 0);
103
104
    // For small tables we require at least 1 empty bucket so that lookups are
105
    // guaranteed to terminate if an element doesn't exist in the table.
106
0
    if cap < 8 {
107
        // We don't bother with a table size of 2 buckets since that can only
108
        // hold a single element. Instead we skip directly to a 4 bucket table
109
        // which can hold 3 elements.
110
0
        return Some(if cap < 4 { 4 } else { 8 });
111
0
    }
112
113
    // Otherwise require 1/8 buckets to be empty (87.5% load)
114
    //
115
    // Be careful when modifying this, calculate_layout relies on the
116
    // overflow check here.
117
0
    let adjusted_cap = cap.checked_mul(8)? / 7;
118
119
    // Any overflows will have been caught by the checked_mul. Also, any
120
    // rounding errors from the division above will be cleaned up by
121
    // next_power_of_two (which can't overflow because of the previous division).
122
0
    Some(adjusted_cap.next_power_of_two())
123
0
}
Unexecuted instantiation: hashbrown::raw::capacity_to_buckets
Unexecuted instantiation: hashbrown::raw::capacity_to_buckets
Unexecuted instantiation: hashbrown::raw::capacity_to_buckets
124
125
/// Returns the maximum effective capacity for the given bucket mask, taking
126
/// the maximum load factor into account.
127
#[inline]
128
0
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
129
0
    if bucket_mask < 8 {
130
        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
131
        // Keep in mind that the bucket mask is one less than the bucket count.
132
0
        bucket_mask
133
    } else {
134
        // For larger tables we reserve 12.5% of the slots as empty.
135
0
        ((bucket_mask + 1) / 8) * 7
136
    }
137
0
}
Unexecuted instantiation: hashbrown::raw::bucket_mask_to_capacity
Unexecuted instantiation: hashbrown::raw::bucket_mask_to_capacity
Unexecuted instantiation: hashbrown::raw::bucket_mask_to_capacity
Unexecuted instantiation: hashbrown::raw::bucket_mask_to_capacity
138
139
/// Helper which allows the max calculation for `ctrl_align` to be statically computed for each `T`
140
/// while keeping the rest of `calculate_layout_for` independent of `T`
141
#[derive(Copy, Clone)]
142
struct TableLayout {
143
    size: usize,
144
    ctrl_align: usize,
145
}
146
147
impl TableLayout {
148
    #[inline]
149
0
    const fn new<T>() -> Self {
150
0
        let layout = Layout::new::<T>();
151
        Self {
152
0
            size: layout.size(),
153
0
            ctrl_align: if layout.align() > Group::WIDTH {
154
0
                layout.align()
155
            } else {
156
0
                Group::WIDTH
157
            },
158
        }
159
0
    }
160
161
    #[inline]
162
0
    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
163
0
        debug_assert!(buckets.is_power_of_two());
164
165
0
        let TableLayout { size, ctrl_align } = self;
166
        // Manual layout calculation since Layout methods are not yet stable.
167
0
        let ctrl_offset =
168
0
            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
169
0
        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
170
171
        // We need an additional check to ensure that the allocation doesn't
172
        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
173
0
        if len > isize::MAX as usize - (ctrl_align - 1) {
174
0
            return None;
175
0
        }
176
177
0
        Some((
178
0
            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
179
0
            ctrl_offset,
180
0
        ))
181
0
    }
Unexecuted instantiation: <hashbrown::raw::TableLayout>::calculate_layout_for
Unexecuted instantiation: <hashbrown::raw::TableLayout>::calculate_layout_for
Unexecuted instantiation: <hashbrown::raw::TableLayout>::calculate_layout_for
Unexecuted instantiation: <hashbrown::raw::TableLayout>::calculate_layout_for
182
}
183
184
/// A reference to an empty bucket into which an can be inserted.
185
pub struct InsertSlot {
186
    index: usize,
187
}
188
189
/// A reference to a hash table bucket containing a `T`.
190
///
191
/// This is usually just a pointer to the element itself. However if the element
192
/// is a ZST, then we instead track the index of the element in the table so
193
/// that `erase` works properly.
194
pub struct Bucket<T> {
195
    // Actually it is pointer to next element than element itself
196
    // this is needed to maintain pointer arithmetic invariants
197
    // keeping direct pointer to element introduces difficulty.
198
    // Using `NonNull` for variance and niche layout
199
    ptr: NonNull<T>,
200
}
201
202
// This Send impl is needed for rayon support. This is safe since Bucket is
203
// never exposed in a public API.
204
unsafe impl<T> Send for Bucket<T> {}
205
206
impl<T> Clone for Bucket<T> {
207
    #[inline]
208
0
    fn clone(&self) -> Self {
209
0
        Self { ptr: self.ptr }
210
0
    }
211
}
212
213
impl<T> Bucket<T> {
214
    /// Creates a [`Bucket`] that contain pointer to the data.
215
    /// The pointer calculation is performed by calculating the
216
    /// offset from given `base` pointer (convenience for
217
    /// `base.as_ptr().sub(index)`).
218
    ///
219
    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
220
    /// offset of `3 * size_of::<T>()` bytes.
221
    ///
222
    /// If the `T` is a ZST, then we instead track the index of the element
223
    /// in the table so that `erase` works properly (return
224
    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
225
    ///
226
    /// # Safety
227
    ///
228
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
229
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
230
    /// rules of [`NonNull::new_unchecked`] function.
231
    ///
232
    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
233
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
234
    /// logic of the work of this crate, the following rules are necessary and
235
    /// sufficient:
236
    ///
237
    /// * the `base` pointer must not be `dangling` and must points to the
238
    ///   end of the first `value element` from the `data part` of the table, i.e.
239
    ///   must be the pointer that returned by [`RawTable::data_end`] or by
240
    ///   [`RawTableInner::data_end<T>`];
241
    ///
242
    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
243
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
244
    ///   must be no greater than the number returned by the function
245
    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
246
    ///
247
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
248
    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
249
    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
250
    /// must be no greater than the number returned by the function
251
    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
252
    ///
253
    /// [`Bucket`]: crate::raw::Bucket
254
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
255
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
256
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
257
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
258
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
259
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
260
    #[inline]
261
0
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
262
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
263
        // the data part of the table (we start counting from "0", so that
264
        // in the expression T[last], the "last" index actually one less than the
265
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
266
        //
267
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
268
        //                   points here in the data part of the table
269
        //                   (to the start of T1)
270
        //                        |
271
        //                        |        `base: NonNull<T>` must point here
272
        //                        |         (to the end of T0 or to the start of C0)
273
        //                        v         v
274
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
275
        //                           ^
276
        //                           `from_base_index(base, 1)` returns a pointer
277
        //                           that points here in the data part of the table
278
        //                           (to the end of T1)
279
        //
280
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
281
        // or metadata for data.
282
0
        let ptr = if T::IS_ZERO_SIZED {
283
            // won't overflow because index must be less than length (bucket_mask)
284
            // and bucket_mask is guaranteed to be less than `isize::MAX`
285
            // (see TableLayout::calculate_layout_for method)
286
0
            invalid_mut(index + 1)
287
        } else {
288
0
            base.as_ptr().sub(index)
289
        };
290
0
        Self {
291
0
            ptr: NonNull::new_unchecked(ptr),
292
0
        }
293
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::from_base_index
294
295
    /// Calculates the index of a [`Bucket`] as distance between two pointers
296
    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
297
    /// The returned value is in units of T: the distance in bytes divided by
298
    /// [`core::mem::size_of::<T>()`].
299
    ///
300
    /// If the `T` is a ZST, then we return the index of the element in
301
    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
302
    ///
303
    /// This function is the inverse of [`from_base_index`].
304
    ///
305
    /// # Safety
306
    ///
307
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
308
    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
309
    ///
310
    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
311
    /// method, as well as for the correct logic of the work of this crate, the
312
    /// following rules are necessary and sufficient:
313
    ///
314
    /// * `base` contained pointer must not be `dangling` and must point to the
315
    ///   end of the first `element` from the `data part` of the table, i.e.
316
    ///   must be a pointer that returns by [`RawTable::data_end`] or by
317
    ///   [`RawTableInner::data_end<T>`];
318
    ///
319
    /// * `self` also must not contain dangling pointer;
320
    ///
321
    /// * both `self` and `base` must be created from the same [`RawTable`]
322
    ///   (or [`RawTableInner`]).
323
    ///
324
    /// If `mem::size_of::<T>() == 0`, this function is always safe.
325
    ///
326
    /// [`Bucket`]: crate::raw::Bucket
327
    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
328
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
329
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
330
    /// [`RawTable`]: crate::raw::RawTable
331
    /// [`RawTableInner`]: RawTableInner
332
    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
333
    #[inline]
334
0
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
335
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
336
        // `element` in the data part of the table (we start counting from "0", so
337
        // that in the expression T[last], the "last" index actually is one less than the
338
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
339
        // For example for 5th element in table calculation is performed like this:
340
        //
341
        //                        mem::size_of::<T>()
342
        //                          |
343
        //                          |         `self = from_base_index(base, 5)` that returns pointer
344
        //                          |         that points here in the data part of the table
345
        //                          |         (to the end of T5)
346
        //                          |           |                    `base: NonNull<T>` must point here
347
        //                          v           |                    (to the end of T0 or to the start of C0)
348
        //                        /???\         v                      v
349
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
350
        //                                      \__________  __________/
351
        //                                                 \/
352
        //                                     `bucket.to_base_index(base)` = 5
353
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
354
        //
355
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
356
0
        if T::IS_ZERO_SIZED {
357
            // this can not be UB
358
0
            self.ptr.as_ptr() as usize - 1
359
        } else {
360
0
            offset_from(base.as_ptr(), self.ptr.as_ptr())
361
        }
362
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::to_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::to_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::to_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::to_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::to_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::to_base_index
363
364
    /// Acquires the underlying raw pointer `*mut T` to `data`.
365
    ///
366
    /// # Note
367
    ///
368
    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
369
    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
370
    /// for properly dropping the data we also need to clear `data` control bytes. If we
371
    /// drop data, but do not clear `data control byte` it leads to double drop when
372
    /// [`RawTable`] goes out of scope.
373
    ///
374
    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
375
    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
376
    /// will not re-evaluate where the new value should go, meaning the value may become
377
    /// "lost" if their location does not reflect their state.
378
    ///
379
    /// [`RawTable`]: crate::raw::RawTable
380
    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
381
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
382
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
383
    #[inline]
384
0
    pub fn as_ptr(&self) -> *mut T {
385
0
        if T::IS_ZERO_SIZED {
386
            // Just return an arbitrary ZST pointer which is properly aligned
387
            // invalid pointer is good enough for ZST
388
0
            invalid_mut(mem::align_of::<T>())
389
        } else {
390
0
            unsafe { self.ptr.as_ptr().sub(1) }
391
        }
392
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_ptr
393
394
    /// Acquires the underlying non-null pointer `*mut T` to `data`.
395
    #[inline]
396
0
    fn as_non_null(&self) -> NonNull<T> {
397
        // SAFETY: `self.ptr` is already a `NonNull`
398
0
        unsafe { NonNull::new_unchecked(self.as_ptr()) }
399
0
    }
400
401
    /// Create a new [`Bucket`] that is offset from the `self` by the given
402
    /// `offset`. The pointer calculation is performed by calculating the
403
    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
404
    /// This function is used for iterators.
405
    ///
406
    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
407
    /// offset of `3 * size_of::<T>()` bytes.
408
    ///
409
    /// # Safety
410
    ///
411
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
412
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
413
    /// rules of [`NonNull::new_unchecked`] function.
414
    ///
415
    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
416
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
417
    /// logic of the work of this crate, the following rules are necessary and
418
    /// sufficient:
419
    ///
420
    /// * `self` contained pointer must not be `dangling`;
421
    ///
422
    /// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
423
    ///   i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other
424
    ///   words, `self.to_base_index() + offset + 1` must be no greater than the number returned
425
    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
426
    ///
427
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
428
    /// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
429
    /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words,
430
    /// `self.to_base_index() + offset + 1` must be no greater than the number returned by the
431
    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
432
    ///
433
    /// [`Bucket`]: crate::raw::Bucket
434
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
435
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
436
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
437
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
438
    #[inline]
439
0
    unsafe fn next_n(&self, offset: usize) -> Self {
440
0
        let ptr = if T::IS_ZERO_SIZED {
441
            // invalid pointer is good enough for ZST
442
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
443
        } else {
444
0
            self.ptr.as_ptr().sub(offset)
445
        };
446
0
        Self {
447
0
            ptr: NonNull::new_unchecked(ptr),
448
0
        }
449
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::next_n
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::next_n
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::next_n
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::next_n
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::next_n
450
451
    /// Executes the destructor (if any) of the pointed-to `data`.
452
    ///
453
    /// # Safety
454
    ///
455
    /// See [`ptr::drop_in_place`] for safety concerns.
456
    ///
457
    /// You should use [`RawTable::erase`] instead of this function,
458
    /// or be careful with calling this function directly, because for
459
    /// properly dropping the data we need also clear `data` control bytes.
460
    /// If we drop data, but do not erase `data control byte` it leads to
461
    /// double drop when [`RawTable`] goes out of scope.
462
    ///
463
    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
464
    /// [`RawTable`]: crate::raw::RawTable
465
    /// [`RawTable::erase`]: crate::raw::RawTable::erase
466
    #[cfg_attr(feature = "inline-more", inline)]
467
0
    pub(crate) unsafe fn drop(&self) {
468
0
        self.as_ptr().drop_in_place();
469
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::drop
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::drop
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::drop
470
471
    /// Reads the `value` from `self` without moving it. This leaves the
472
    /// memory in `self` unchanged.
473
    ///
474
    /// # Safety
475
    ///
476
    /// See [`ptr::read`] for safety concerns.
477
    ///
478
    /// You should use [`RawTable::remove`] instead of this function,
479
    /// or be careful with calling this function directly, because compiler
480
    /// calls its destructor when the read `value` goes out of scope. It
481
    /// can cause double dropping when [`RawTable`] goes out of scope,
482
    /// because of not erased `data control byte`.
483
    ///
484
    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
485
    /// [`RawTable`]: crate::raw::RawTable
486
    /// [`RawTable::remove`]: crate::raw::RawTable::remove
487
    #[inline]
488
0
    pub(crate) unsafe fn read(&self) -> T {
489
0
        self.as_ptr().read()
490
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::read
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::read
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::read
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::read
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::read
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::read
491
492
    /// Overwrites a memory location with the given `value` without reading
493
    /// or dropping the old value (like [`ptr::write`] function).
494
    ///
495
    /// # Safety
496
    ///
497
    /// See [`ptr::write`] for safety concerns.
498
    ///
499
    /// # Note
500
    ///
501
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
502
    /// those for the old `T` value, as the map will not re-evaluate where the new
503
    /// value should go, meaning the value may become "lost" if their location
504
    /// does not reflect their state.
505
    ///
506
    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
507
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
508
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
509
    #[inline]
510
0
    pub(crate) unsafe fn write(&self, val: T) {
511
0
        self.as_ptr().write(val);
512
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::write
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::write
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::write
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::write
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::write
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::write
513
514
    /// Returns a shared immutable reference to the `value`.
515
    ///
516
    /// # Safety
517
    ///
518
    /// See [`NonNull::as_ref`] for safety concerns.
519
    ///
520
    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
521
    #[inline]
522
0
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
523
0
        &*self.as_ptr()
524
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::as_ref
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::as_ref
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::as_ref
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_ref
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_ref
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_ref
525
526
    /// Returns a unique mutable reference to the `value`.
527
    ///
528
    /// # Safety
529
    ///
530
    /// See [`NonNull::as_mut`] for safety concerns.
531
    ///
532
    /// # Note
533
    ///
534
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
535
    /// those for the old `T` value, as the map will not re-evaluate where the new
536
    /// value should go, meaning the value may become "lost" if their location
537
    /// does not reflect their state.
538
    ///
539
    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
540
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
541
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
542
    #[inline]
543
0
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
544
0
        &mut *self.as_ptr()
545
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::as_mut
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::as_mut
Unexecuted instantiation: <hashbrown::raw::Bucket<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::as_mut
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_mut
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_mut
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_mut
546
}
547
548
/// A raw hash table with an unsafe API.
549
pub struct RawTable<T, A: Allocator = Global> {
550
    table: RawTableInner,
551
    alloc: A,
552
    // Tell dropck that we own instances of T.
553
    marker: PhantomData<T>,
554
}
555
556
/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
557
/// of how many different key-value types are used.
558
struct RawTableInner {
559
    // Mask to get an index from a hash value. The value is one less than the
560
    // number of buckets in the table.
561
    bucket_mask: usize,
562
563
    // [Padding], T_n, ..., T1, T0, C0, C1, ...
564
    //                              ^ points here
565
    ctrl: NonNull<u8>,
566
567
    // Number of elements that can be inserted before we need to grow the table
568
    growth_left: usize,
569
570
    // Number of elements in the table, only really used by len()
571
    items: usize,
572
}
573
574
impl<T> RawTable<T, Global> {
575
    /// Creates a new empty hash table without allocating any memory.
576
    ///
577
    /// In effect this returns a table with exactly 1 bucket. However we can
578
    /// leave the data pointer dangling since that bucket is never written to
579
    /// due to our load factor forcing us to always have at least 1 free bucket.
580
    #[inline]
581
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
582
0
    pub const fn new() -> Self {
583
0
        Self {
584
0
            table: RawTableInner::NEW,
585
0
            alloc: Global,
586
0
            marker: PhantomData,
587
0
        }
588
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::new
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::new
Unexecuted instantiation: <hashbrown::raw::RawTable<_>>::new
589
590
    /// Allocates a new hash table with at least enough capacity for inserting
591
    /// the given number of elements without reallocating.
592
0
    pub fn with_capacity(capacity: usize) -> Self {
593
0
        Self::with_capacity_in(capacity, Global)
594
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity
Unexecuted instantiation: <hashbrown::raw::RawTable<_>>::with_capacity
595
}
596
597
impl<T, A: Allocator> RawTable<T, A> {
598
    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
599
600
    /// Creates a new empty hash table without allocating any memory, using the
601
    /// given allocator.
602
    ///
603
    /// In effect this returns a table with exactly 1 bucket. However we can
604
    /// leave the data pointer dangling since that bucket is never written to
605
    /// due to our load factor forcing us to always have at least 1 free bucket.
606
    #[inline]
607
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
608
0
    pub const fn new_in(alloc: A) -> Self {
609
0
        Self {
610
0
            table: RawTableInner::NEW,
611
0
            alloc,
612
0
            marker: PhantomData,
613
0
        }
614
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::new_in
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::new_in
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::new_in
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::new_in
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::new_in
615
616
    /// Allocates a new hash table with the given number of buckets.
617
    ///
618
    /// The control bytes are left uninitialized.
619
    #[cfg_attr(feature = "inline-more", inline)]
620
0
    unsafe fn new_uninitialized(
621
0
        alloc: A,
622
0
        buckets: usize,
623
0
        fallibility: Fallibility,
624
0
    ) -> Result<Self, TryReserveError> {
625
0
        debug_assert!(buckets.is_power_of_two());
626
627
        Ok(Self {
628
0
            table: RawTableInner::new_uninitialized(
629
0
                &alloc,
630
                Self::TABLE_LAYOUT,
631
0
                buckets,
632
0
                fallibility,
633
0
            )?,
634
0
            alloc,
635
0
            marker: PhantomData,
636
        })
637
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::new_uninitialized
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::new_uninitialized
638
639
    /// Allocates a new hash table using the given allocator, with at least enough capacity for
640
    /// inserting the given number of elements without reallocating.
641
0
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
642
0
        Self {
643
0
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
644
0
            alloc,
645
0
            marker: PhantomData,
646
0
        }
647
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity_in
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity_in
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::with_capacity_in
648
649
    /// Returns a reference to the underlying allocator.
650
    #[inline]
651
0
    pub fn allocator(&self) -> &A {
652
0
        &self.alloc
653
0
    }
654
655
    /// Returns pointer to one past last `data` element in the table as viewed from
656
    /// the start point of the allocation.
657
    ///
658
    /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
659
    /// otherwise using it may result in [`undefined behavior`].
660
    ///
661
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
662
    #[inline]
663
0
    pub fn data_end(&self) -> NonNull<T> {
664
        //                        `self.table.ctrl.cast()` returns pointer that
665
        //                        points here (to the end of `T0`)
666
        //                          ∨
667
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
668
        //                           \________  ________/
669
        //                                    \/
670
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
671
        //
672
        // where: T0...T_n  - our stored data;
673
        //        CT0...CT_n - control bytes or metadata for `data`.
674
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
675
        //                        with loading `Group` bytes from the heap works properly, even if the result
676
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
677
        //                        `RawTableInner::set_ctrl` function.
678
        //
679
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
680
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
681
0
        self.table.ctrl.cast()
682
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::data_end
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::data_end
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::data_end
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::data_end
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::data_end
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::data_end
683
684
    /// Returns pointer to start of data table.
685
    #[inline]
686
    #[cfg(feature = "nightly")]
687
    pub unsafe fn data_start(&self) -> NonNull<T> {
688
        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
689
    }
690
691
    /// Returns the total amount of memory allocated internally by the hash
692
    /// table, in bytes.
693
    ///
694
    /// The returned number is informational only. It is intended to be
695
    /// primarily used for memory profiling.
696
    #[inline]
697
0
    pub fn allocation_size(&self) -> usize {
698
        // SAFETY: We use the same `table_layout` that was used to allocate
699
        // this table.
700
0
        unsafe { self.table.allocation_size_or_zero(Self::TABLE_LAYOUT) }
701
0
    }
702
703
    /// Returns the index of a bucket from a `Bucket`.
704
    #[inline]
705
0
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
706
0
        bucket.to_base_index(self.data_end())
707
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::bucket_index
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::bucket_index
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::bucket_index
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::bucket_index
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::bucket_index
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::bucket_index
708
709
    /// Returns a pointer to an element in the table.
710
    ///
711
    /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
712
    /// otherwise using it may result in [`undefined behavior`].
713
    ///
714
    /// # Safety
715
    ///
716
    /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
717
    /// following safety rules:
718
    ///
719
    /// * The table must already be allocated;
720
    ///
721
    /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
722
    ///   function, i.e. `(index + 1) <= self.buckets()`.
723
    ///
724
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
725
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
726
    ///
727
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
728
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
729
    /// `(index + 1) <= self.buckets()`.
730
    ///
731
    /// [`RawTable::buckets`]: RawTable::buckets
732
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
733
    #[inline]
734
0
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
735
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
736
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
737
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
738
        //
739
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
740
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
741
        //                  |
742
        //                  |               `base = self.data_end()` points here
743
        //                  |               (to the start of CT0 or to the end of T0)
744
        //                  v                 v
745
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
746
        //                     ^                                              \__________  __________/
747
        //        `table.bucket(3)` returns a pointer that points                        \/
748
        //         here in the `data` part of the `RawTable` (to              additional control bytes
749
        //         the end of T3)                                              `m = Group::WIDTH - 1`
750
        //
751
        // where: T0...T_n  - our stored data;
752
        //        CT0...CT_n - control bytes or metadata for `data`;
753
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
754
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
755
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
756
        //
757
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
758
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
759
0
        debug_assert_ne!(self.table.bucket_mask, 0);
760
0
        debug_assert!(index < self.buckets());
761
0
        Bucket::from_base_index(self.data_end(), index)
762
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::bucket
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::bucket
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::bucket
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::bucket
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::bucket
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::bucket
763
764
    /// Erases an element from the table without dropping it.
765
    #[cfg_attr(feature = "inline-more", inline)]
766
0
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
767
0
        let index = self.bucket_index(item);
768
0
        self.table.erase(index);
769
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::erase_no_drop
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::erase_no_drop
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::erase_no_drop
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::erase_no_drop
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::erase_no_drop
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::erase_no_drop
770
771
    /// Erases an element from the table, dropping it in place.
772
    #[cfg_attr(feature = "inline-more", inline)]
773
    #[allow(clippy::needless_pass_by_value)]
774
0
    pub unsafe fn erase(&mut self, item: Bucket<T>) {
775
        // Erase the element from the table first since drop might panic.
776
0
        self.erase_no_drop(&item);
777
0
        item.drop();
778
0
    }
779
780
    /// Removes an element from the table, returning it.
781
    ///
782
    /// This also returns an `InsertSlot` pointing to the newly free bucket.
783
    #[cfg_attr(feature = "inline-more", inline)]
784
    #[allow(clippy::needless_pass_by_value)]
785
0
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
786
0
        self.erase_no_drop(&item);
787
0
        (
788
0
            item.read(),
789
0
            InsertSlot {
790
0
                index: self.bucket_index(&item),
791
0
            },
792
0
        )
793
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::remove
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::remove
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::remove
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::remove
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::remove
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::remove
794
795
    /// Finds and removes an element from the table, returning it.
796
    #[cfg_attr(feature = "inline-more", inline)]
797
0
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
798
        // Avoid `Option::map` because it bloats LLVM IR.
799
0
        match self.find(hash, eq) {
800
0
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
801
0
            None => None,
802
        }
803
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyRef<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyWrapper<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::remove_entry::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::remove_entry::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns)>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::remove_entry::<_>
804
805
    /// Marks all table buckets as empty without dropping their contents.
806
    #[cfg_attr(feature = "inline-more", inline)]
807
0
    pub fn clear_no_drop(&mut self) {
808
0
        self.table.clear_no_drop();
809
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::clear_no_drop
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::clear_no_drop
810
811
    /// Removes all elements from the table without freeing the backing memory.
812
    #[cfg_attr(feature = "inline-more", inline)]
813
0
    pub fn clear(&mut self) {
814
0
        if self.is_empty() {
815
            // Special case empty table to avoid surprising O(capacity) time.
816
0
            return;
817
0
        }
818
        // Ensure that the table is reset even if one of the drops panic
819
0
        let mut self_ = guard(self, |self_| self_.clear_no_drop());
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::clear::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::clear::{closure#0}
820
0
        unsafe {
821
0
            // SAFETY: ScopeGuard sets to zero the `items` field of the table
822
0
            // even in case of panic during the dropping of the elements so
823
0
            // that there will be no double drop of the elements.
824
0
            self_.table.drop_elements::<T>();
825
0
        }
826
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::clear
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::clear
827
828
    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
829
    #[cfg_attr(feature = "inline-more", inline)]
830
0
    pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
831
        // Calculate the minimal number of elements that we need to reserve
832
        // space for.
833
0
        let min_size = usize::max(self.table.items, min_size);
834
0
        if min_size == 0 {
835
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
836
0
            unsafe {
837
0
                // SAFETY:
838
0
                // 1. We call the function only once;
839
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
840
0
                //    and [`TableLayout`] that were used to allocate this table.
841
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
842
0
                //    because we have replaced the inner table with a new one.
843
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
844
0
            }
845
0
            return;
846
0
        }
847
848
        // Calculate the number of buckets that we need for this number of
849
        // elements. If the calculation overflows then the requested bucket
850
        // count must be larger than what we have right and nothing needs to be
851
        // done.
852
0
        let min_buckets = match capacity_to_buckets(min_size) {
853
0
            Some(buckets) => buckets,
854
0
            None => return,
855
        };
856
857
        // If we have more buckets than we need, shrink the table.
858
0
        if min_buckets < self.buckets() {
859
            // Fast path if the table is empty
860
0
            if self.table.items == 0 {
861
0
                let new_inner =
862
0
                    RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
863
0
                let mut old_inner = mem::replace(&mut self.table, new_inner);
864
0
                unsafe {
865
0
                    // SAFETY:
866
0
                    // 1. We call the function only once;
867
0
                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
868
0
                    //    and [`TableLayout`] that were used to allocate this table.
869
0
                    // 3. If any elements' drop function panics, then there will only be a memory leak,
870
0
                    //    because we have replaced the inner table with a new one.
871
0
                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
872
0
                }
873
            } else {
874
                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
875
                unsafe {
876
                    // SAFETY:
877
                    // 1. We know for sure that `min_size >= self.table.items`.
878
                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
879
                    //    we will never expose RawTable::new_uninitialized in a public API.
880
0
                    if self
881
0
                        .resize(min_size, hasher, Fallibility::Infallible)
882
0
                        .is_err()
883
                    {
884
                        // SAFETY: The result of calling the `resize` function cannot be an error
885
                        // because `fallibility == Fallibility::Infallible.
886
0
                        hint::unreachable_unchecked()
887
0
                    }
888
                }
889
            }
890
0
        }
891
0
    }
892
893
    /// Ensures that at least `additional` items can be inserted into the table
894
    /// without reallocation.
895
    #[cfg_attr(feature = "inline-more", inline)]
896
0
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
897
0
        if unlikely(additional > self.table.growth_left) {
898
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
899
            unsafe {
900
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
901
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
902
0
                if self
903
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
904
0
                    .is_err()
905
                {
906
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
907
0
                    hint::unreachable_unchecked()
908
0
                }
909
            }
910
0
        }
911
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::reserve::<hashbrown::map::make_hasher<lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>, foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::reserve::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState, foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::reserve::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns), foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<ztunnel::identity::manager::Identity, keyed_priority_queue::editable_binary_heap::HeapIndex>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<serde_yaml::value::Value, serde_yaml::value::Value>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve::<_>
912
913
    /// Tries to ensure that at least `additional` items can be inserted into
914
    /// the table without reallocation.
915
    #[cfg_attr(feature = "inline-more", inline)]
916
0
    pub fn try_reserve(
917
0
        &mut self,
918
0
        additional: usize,
919
0
        hasher: impl Fn(&T) -> u64,
920
0
    ) -> Result<(), TryReserveError> {
921
0
        if additional > self.table.growth_left {
922
            // SAFETY: The [`RawTableInner`] must already have properly initialized control
923
            // bytes since we will never expose RawTable::new_uninitialized in a public API.
924
0
            unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
925
        } else {
926
0
            Ok(())
927
        }
928
0
    }
929
930
    /// Out-of-line slow path for `reserve` and `try_reserve`.
931
    ///
932
    /// # Safety
933
    ///
934
    /// The [`RawTableInner`] must have properly initialized control bytes,
935
    /// otherwise calling this function results in [`undefined behavior`]
936
    ///
937
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
938
    #[cold]
939
    #[inline(never)]
940
0
    unsafe fn reserve_rehash(
941
0
        &mut self,
942
0
        additional: usize,
943
0
        hasher: impl Fn(&T) -> u64,
944
0
        fallibility: Fallibility,
945
0
    ) -> Result<(), TryReserveError> {
946
        unsafe {
947
            // SAFETY:
948
            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
949
            //    [`TableLayout`] that were used to allocate this table.
950
            // 2. The `drop` function is the actual drop function of the elements stored in
951
            //    the table.
952
            // 3. The caller ensures that the control bytes of the `RawTableInner`
953
            //    are already initialized.
954
0
            self.table.reserve_rehash_inner(
955
0
                &self.alloc,
956
0
                additional,
957
0
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>, foldhash::seed::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::reserve_rehash::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState, foldhash::seed::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::reserve_rehash::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns), foldhash::seed::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<ztunnel::identity::manager::Identity, keyed_priority_queue::editable_binary_heap::HeapIndex>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<serde_yaml::value::Value, serde_yaml::value::Value>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve_rehash::<_>::{closure#0}
958
0
                fallibility,
959
                Self::TABLE_LAYOUT,
960
0
                if T::NEEDS_DROP {
961
0
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::reserve_rehash::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState, foldhash::seed::fast::RandomState>::{closure#0}>::{closure#1}
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::reserve_rehash::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns), foldhash::seed::fast::RandomState>::{closure#0}>::{closure#1}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve_rehash::<_>::{closure#1}
962
                } else {
963
0
                    None
964
                },
965
            )
966
        }
967
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>, foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::reserve_rehash::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState, foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::reserve_rehash::<hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns), foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<ztunnel::identity::manager::Identity, keyed_priority_queue::editable_binary_heap::HeapIndex>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<serde_yaml::value::Value, serde_yaml::value::Value>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve_rehash::<_>
968
969
    /// Allocates a new table of a different size and moves the contents of the
970
    /// current table into it.
971
    ///
972
    /// # Safety
973
    ///
974
    /// The [`RawTableInner`] must have properly initialized control bytes,
975
    /// otherwise calling this function results in [`undefined behavior`]
976
    ///
977
    /// The caller of this function must ensure that `capacity >= self.table.items`
978
    /// otherwise:
979
    ///
980
    /// * If `self.table.items != 0`, calling of this function with `capacity`
981
    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
982
    ///
983
    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
984
    ///   `self.table.items > capacity_to_buckets(capacity)`
985
    ///   calling this function results in [`undefined behavior`].
986
    ///
987
    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
988
    ///   `self.table.items > capacity_to_buckets(capacity)`
989
    ///   calling this function are never return (will go into an
990
    ///   infinite loop).
991
    ///
992
    /// See [`RawTableInner::find_insert_slot`] for more information.
993
    ///
994
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
995
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
996
0
    unsafe fn resize(
997
0
        &mut self,
998
0
        capacity: usize,
999
0
        hasher: impl Fn(&T) -> u64,
1000
0
        fallibility: Fallibility,
1001
0
    ) -> Result<(), TryReserveError> {
1002
        // SAFETY:
1003
        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1004
        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1005
        //    [`TableLayout`] that were used to allocate this table.
1006
        // 3. The caller ensures that the control bytes of the `RawTableInner`
1007
        //    are already initialized.
1008
0
        self.table.resize_inner(
1009
0
            &self.alloc,
1010
0
            capacity,
1011
0
            &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1012
0
            fallibility,
1013
            Self::TABLE_LAYOUT,
1014
        )
1015
0
    }
1016
1017
    /// Inserts a new element into the table, and returns its raw bucket.
1018
    ///
1019
    /// This does not check if the given element already exists in the table.
1020
    #[cfg_attr(feature = "inline-more", inline)]
1021
0
    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1022
        unsafe {
1023
            // SAFETY:
1024
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1025
            //    we will never expose `RawTable::new_uninitialized` in a public API.
1026
            //
1027
            // 2. We reserve additional space (if necessary) right after calling this function.
1028
0
            let mut slot = self.table.find_insert_slot(hash);
1029
1030
            // We can avoid growing the table once we have reached our load factor if we are replacing
1031
            // a tombstone. This works since the number of EMPTY slots does not change in this case.
1032
            //
1033
            // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
1034
            // in the range `0..=self.buckets()`.
1035
0
            let old_ctrl = *self.table.ctrl(slot.index);
1036
0
            if unlikely(self.table.growth_left == 0 && old_ctrl.special_is_empty()) {
1037
0
                self.reserve(1, hasher);
1038
0
                // SAFETY: We know for sure that `RawTableInner` has control bytes
1039
0
                // initialized and that there is extra space in the table.
1040
0
                slot = self.table.find_insert_slot(hash);
1041
0
            }
1042
1043
0
            self.insert_in_slot(hash, slot, value)
1044
        }
1045
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::insert::<indexmap::map::core::get_hash<ztunnel::identity::manager::Identity, keyed_priority_queue::editable_binary_heap::HeapIndex>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::insert::<indexmap::map::core::get_hash<serde_yaml::value::Value, serde_yaml::value::Value>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::insert::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::insert::<_>
1046
1047
    /// Inserts a new element into the table, and returns a mutable reference to it.
1048
    ///
1049
    /// This does not check if the given element already exists in the table.
1050
    #[cfg_attr(feature = "inline-more", inline)]
1051
0
    pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
1052
0
        unsafe { self.insert(hash, value, hasher).as_mut() }
1053
0
    }
1054
1055
    /// Inserts a new element into the table, without growing the table.
1056
    ///
1057
    /// There must be enough space in the table to insert the new element.
1058
    ///
1059
    /// This does not check if the given element already exists in the table.
1060
    #[cfg_attr(feature = "inline-more", inline)]
1061
    #[cfg(feature = "rustc-internal-api")]
1062
    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1063
        let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1064
        let bucket = self.table.bucket(index);
1065
1066
        // If we are replacing a DELETED entry then we don't need to update
1067
        // the load counter.
1068
        self.table.growth_left -= old_ctrl.special_is_empty() as usize;
1069
1070
        bucket.write(value);
1071
        self.table.items += 1;
1072
        bucket
1073
    }
1074
1075
    /// Temporary removes a bucket, applying the given function to the removed
1076
    /// element and optionally put back the returned value in the same bucket.
1077
    ///
1078
    /// Returns `true` if the bucket still contains an element
1079
    ///
1080
    /// This does not check if the given bucket is actually occupied.
1081
    #[cfg_attr(feature = "inline-more", inline)]
1082
0
    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1083
0
    where
1084
0
        F: FnOnce(T) -> Option<T>,
1085
    {
1086
0
        let index = self.bucket_index(&bucket);
1087
0
        let old_ctrl = *self.table.ctrl(index);
1088
0
        debug_assert!(self.is_bucket_full(index));
1089
0
        let old_growth_left = self.table.growth_left;
1090
0
        let item = self.remove(bucket).0;
1091
0
        if let Some(new_item) = f(item) {
1092
0
            self.table.growth_left = old_growth_left;
1093
0
            self.table.set_ctrl(index, old_ctrl);
1094
0
            self.table.items += 1;
1095
0
            self.bucket(index).write(new_item);
1096
0
            true
1097
        } else {
1098
0
            false
1099
        }
1100
0
    }
1101
1102
    /// Searches for an element in the table. If the element is not found,
1103
    /// returns `Err` with the position of a slot where an element with the
1104
    /// same hash could be inserted.
1105
    ///
1106
    /// This function may resize the table if additional space is required for
1107
    /// inserting an element.
1108
    #[inline]
1109
0
    pub fn find_or_find_insert_slot(
1110
0
        &mut self,
1111
0
        hash: u64,
1112
0
        mut eq: impl FnMut(&T) -> bool,
1113
0
        hasher: impl Fn(&T) -> u64,
1114
0
    ) -> Result<Bucket<T>, InsertSlot> {
1115
0
        self.reserve(1, hasher);
1116
1117
        unsafe {
1118
            // SAFETY:
1119
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1120
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1121
            //    never expose `RawTable::new_uninitialized` in a public API.
1122
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1123
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1124
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1125
0
            match self
1126
0
                .table
1127
0
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>, foldhash::seed::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState>::{closure#0}, hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState, foldhash::seed::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns)>::{closure#0}, hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns), foldhash::seed::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}, indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find_or_find_insert_slot::<_, _>::{closure#0}
1128
            {
1129
                // SAFETY: See explanation above.
1130
0
                Ok(index) => Ok(self.bucket(index)),
1131
0
                Err(slot) => Err(slot),
1132
            }
1133
        }
1134
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>, foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState>::{closure#0}, hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState, foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns)>::{closure#0}, hashbrown::map::make_hasher<ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns), foldhash::seed::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}, indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find_or_find_insert_slot::<_, _>
1135
1136
    /// Inserts a new element into the table in the given slot, and returns its
1137
    /// raw bucket.
1138
    ///
1139
    /// # Safety
1140
    ///
1141
    /// `slot` must point to a slot previously returned by
1142
    /// `find_or_find_insert_slot`, and no mutation of the table must have
1143
    /// occurred since that call.
1144
    #[inline]
1145
0
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1146
0
        let old_ctrl = *self.table.ctrl(slot.index);
1147
0
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1148
1149
0
        let bucket = self.bucket(slot.index);
1150
0
        bucket.write(value);
1151
0
        bucket
1152
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::insert_in_slot
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::insert_in_slot
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::insert_in_slot
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::insert_in_slot
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::insert_in_slot
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::insert_in_slot
1153
1154
    /// Searches for an element in the table.
1155
    #[inline]
1156
0
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1157
        unsafe {
1158
            // SAFETY:
1159
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1160
            //    will never expose `RawTable::new_uninitialized` in a public API.
1161
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1162
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1163
            //    is safe.
1164
0
            let result = self
1165
0
                .table
1166
0
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::find::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::find::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns)>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<ztunnel::identity::manager::Identity, keyed_priority_queue::editable_binary_heap::HeapIndex, ztunnel::identity::manager::Identity>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<serde_yaml::value::Value, serde_yaml::value::Value, serde_yaml::value::Value>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<serde_yaml::value::Value, serde_yaml::value::Value, serde_yaml::mapping::HashLikeValue>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::erase_index::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::update_index::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::erase_index::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::update_index::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find::<_>::{closure#0}
1167
1168
            // Avoid `Option::map` because it bloats LLVM IR.
1169
0
            match result {
1170
                // SAFETY: See explanation above.
1171
0
                Some(index) => Some(self.bucket(index)),
1172
0
                None => None,
1173
            }
1174
        }
1175
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::find::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::find::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns)>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<ztunnel::identity::manager::Identity, keyed_priority_queue::editable_binary_heap::HeapIndex, ztunnel::identity::manager::Identity>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<serde_yaml::value::Value, serde_yaml::value::Value, serde_yaml::value::Value>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<serde_yaml::value::Value, serde_yaml::value::Value, serde_yaml::mapping::HashLikeValue>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::erase_index::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::update_index::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::erase_index::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::update_index::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find::<_>
1176
1177
    /// Gets a reference to an element in the table.
1178
    #[inline]
1179
0
    pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1180
        // Avoid `Option::map` because it bloats LLVM IR.
1181
0
        match self.find(hash, eq) {
1182
0
            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1183
0
            None => None,
1184
        }
1185
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::get::<hashbrown::map::equivalent_key<ztunnel::inpod::WorkloadUid, ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::get::<indexmap::map::core::equivalent<ztunnel::identity::manager::Identity, keyed_priority_queue::editable_binary_heap::HeapIndex, ztunnel::identity::manager::Identity>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::get::<indexmap::map::core::equivalent<serde_yaml::value::Value, serde_yaml::value::Value, serde_yaml::value::Value>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::get::<indexmap::map::core::equivalent<serde_yaml::value::Value, serde_yaml::value::Value, serde_yaml::mapping::HashLikeValue>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::get::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::get::<_>
1186
1187
    /// Gets a mutable reference to an element in the table.
1188
    #[inline]
1189
0
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1190
        // Avoid `Option::map` because it bloats LLVM IR.
1191
0
        match self.find(hash, eq) {
1192
0
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1193
0
            None => None,
1194
        }
1195
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyRef<i32>, lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::get_mut::<indexmap::map::core::update_index::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::get_mut::<indexmap::map::core::update_index::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::get_mut::<_>
1196
1197
    /// Attempts to get mutable references to `N` entries in the table at once.
1198
    ///
1199
    /// Returns an array of length `N` with the results of each query.
1200
    ///
1201
    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1202
    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1203
    ///
1204
    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1205
    /// the `i`th key to be looked up.
1206
0
    pub fn get_many_mut<const N: usize>(
1207
0
        &mut self,
1208
0
        hashes: [u64; N],
1209
0
        eq: impl FnMut(usize, &T) -> bool,
1210
0
    ) -> [Option<&'_ mut T>; N] {
1211
        unsafe {
1212
0
            let ptrs = self.get_many_mut_pointers(hashes, eq);
1213
1214
0
            for (i, cur) in ptrs.iter().enumerate() {
1215
0
                if cur.is_some() && ptrs[..i].contains(cur) {
1216
0
                    panic!("duplicate keys found");
1217
0
                }
1218
            }
1219
            // All bucket are distinct from all previous buckets so we're clear to return the result
1220
            // of the lookup.
1221
1222
0
            ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1223
        }
1224
0
    }
1225
1226
0
    pub unsafe fn get_many_unchecked_mut<const N: usize>(
1227
0
        &mut self,
1228
0
        hashes: [u64; N],
1229
0
        eq: impl FnMut(usize, &T) -> bool,
1230
0
    ) -> [Option<&'_ mut T>; N] {
1231
0
        let ptrs = self.get_many_mut_pointers(hashes, eq);
1232
0
        ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1233
0
    }
1234
1235
0
    unsafe fn get_many_mut_pointers<const N: usize>(
1236
0
        &mut self,
1237
0
        hashes: [u64; N],
1238
0
        mut eq: impl FnMut(usize, &T) -> bool,
1239
0
    ) -> [Option<NonNull<T>>; N] {
1240
0
        array::from_fn(|i| {
1241
0
            self.find(hashes[i], |k| eq(i, k))
1242
0
                .map(|cur| cur.as_non_null())
1243
0
        })
1244
0
    }
1245
1246
    /// Returns the number of elements the map can hold without reallocating.
1247
    ///
1248
    /// This number is a lower bound; the table might be able to hold
1249
    /// more, but is guaranteed to be able to hold at least this many.
1250
    #[inline]
1251
0
    pub fn capacity(&self) -> usize {
1252
0
        self.table.items + self.table.growth_left
1253
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::capacity
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::capacity
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::capacity
1254
1255
    /// Returns the number of elements in the table.
1256
    #[inline]
1257
0
    pub fn len(&self) -> usize {
1258
0
        self.table.items
1259
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::len
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::len
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::len
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::len
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::len
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::len
1260
1261
    /// Returns `true` if the table contains no elements.
1262
    #[inline]
1263
0
    pub fn is_empty(&self) -> bool {
1264
0
        self.len() == 0
1265
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::is_empty
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::is_empty
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::is_empty
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::is_empty
1266
1267
    /// Returns the number of buckets in the table.
1268
    #[inline]
1269
0
    pub fn buckets(&self) -> usize {
1270
0
        self.table.bucket_mask + 1
1271
0
    }
1272
1273
    /// Checks whether the bucket at `index` is full.
1274
    ///
1275
    /// # Safety
1276
    ///
1277
    /// The caller must ensure `index` is less than the number of buckets.
1278
    #[inline]
1279
0
    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1280
0
        self.table.is_bucket_full(index)
1281
0
    }
1282
1283
    /// Returns an iterator over every element in the table. It is up to
1284
    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1285
    /// Because we cannot make the `next` method unsafe on the `RawIter`
1286
    /// struct, we have to make the `iter` method unsafe.
1287
    #[inline]
1288
0
    pub unsafe fn iter(&self) -> RawIter<T> {
1289
        // SAFETY:
1290
        // 1. The caller must uphold the safety contract for `iter` method.
1291
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1292
        //    we will never expose RawTable::new_uninitialized in a public API.
1293
0
        self.table.iter()
1294
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::iter
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::iter
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::iter
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::iter
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::iter
1295
1296
    /// Returns an iterator over occupied buckets that could match a given hash.
1297
    ///
1298
    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1299
    /// return items that have a hash value different than the one provided. You
1300
    /// should always validate the returned values before using them.
1301
    ///
1302
    /// It is up to the caller to ensure that the `RawTable` outlives the
1303
    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1304
    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1305
    #[cfg_attr(feature = "inline-more", inline)]
1306
0
    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1307
0
        RawIterHash::new(self, hash)
1308
0
    }
1309
1310
    /// Returns an iterator which removes all elements from the table without
1311
    /// freeing the memory.
1312
    #[cfg_attr(feature = "inline-more", inline)]
1313
0
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1314
        unsafe {
1315
0
            let iter = self.iter();
1316
0
            self.drain_iter_from(iter)
1317
        }
1318
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::drain
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::drain
1319
1320
    /// Returns an iterator which removes all elements from the table without
1321
    /// freeing the memory.
1322
    ///
1323
    /// Iteration starts at the provided iterator's current location.
1324
    ///
1325
    /// It is up to the caller to ensure that the iterator is valid for this
1326
    /// `RawTable` and covers all items that remain in the table.
1327
    #[cfg_attr(feature = "inline-more", inline)]
1328
0
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1329
0
        debug_assert_eq!(iter.len(), self.len());
1330
0
        RawDrain {
1331
0
            iter,
1332
0
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1333
0
            orig_table: NonNull::from(&mut self.table),
1334
0
            marker: PhantomData,
1335
0
        }
1336
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::drain_iter_from
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::drain_iter_from
1337
1338
    /// Returns an iterator which consumes all elements from the table.
1339
    ///
1340
    /// Iteration starts at the provided iterator's current location.
1341
    ///
1342
    /// It is up to the caller to ensure that the iterator is valid for this
1343
    /// `RawTable` and covers all items that remain in the table.
1344
0
    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1345
0
        debug_assert_eq!(iter.len(), self.len());
1346
1347
0
        let allocation = self.into_allocation();
1348
0
        RawIntoIter {
1349
0
            iter,
1350
0
            allocation,
1351
0
            marker: PhantomData,
1352
0
        }
1353
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::into_iter_from
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::into_iter_from
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::into_iter_from
1354
1355
    /// Converts the table into a raw allocation. The contents of the table
1356
    /// should be dropped using a `RawIter` before freeing the allocation.
1357
    #[cfg_attr(feature = "inline-more", inline)]
1358
0
    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1359
0
        let alloc = if self.table.is_empty_singleton() {
1360
0
            None
1361
        } else {
1362
            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1363
0
            let (layout, ctrl_offset) =
1364
0
                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1365
0
                    Some(lco) => lco,
1366
0
                    None => unsafe { hint::unreachable_unchecked() },
1367
                };
1368
0
            Some((
1369
0
                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset).cast()) },
1370
0
                layout,
1371
0
                unsafe { ptr::read(&self.alloc) },
1372
0
            ))
1373
        };
1374
0
        mem::forget(self);
1375
0
        alloc
1376
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::into_allocation
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::into_allocation
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::into_allocation
1377
}
1378
1379
unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1380
where
1381
    T: Send,
1382
    A: Send,
1383
{
1384
}
1385
unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1386
where
1387
    T: Sync,
1388
    A: Sync,
1389
{
1390
}
1391
1392
impl RawTableInner {
1393
    const NEW: Self = RawTableInner::new();
1394
1395
    /// Creates a new empty hash table without allocating any memory.
1396
    ///
1397
    /// In effect this returns a table with exactly 1 bucket. However we can
1398
    /// leave the data pointer dangling since that bucket is never accessed
1399
    /// due to our load factor forcing us to always have at least 1 free bucket.
1400
    #[inline]
1401
0
    const fn new() -> Self {
1402
0
        Self {
1403
0
            // Be careful to cast the entire slice to a raw pointer.
1404
0
            ctrl: unsafe {
1405
0
                NonNull::new_unchecked(Group::static_empty().as_ptr().cast_mut().cast())
1406
0
            },
1407
0
            bucket_mask: 0,
1408
0
            items: 0,
1409
0
            growth_left: 0,
1410
0
        }
1411
0
    }
1412
}
1413
1414
impl RawTableInner {
1415
    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1416
    /// The control bytes and buckets are left uninitialized.
1417
    ///
1418
    /// # Safety
1419
    ///
1420
    /// The caller of this function must ensure that the `buckets` is power of two
1421
    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1422
    /// Group::WIDTH` with the [`Tag::EMPTY`] bytes.
1423
    ///
1424
    /// See also [`Allocator`] API for other safety concerns.
1425
    ///
1426
    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1427
    #[cfg_attr(feature = "inline-more", inline)]
1428
0
    unsafe fn new_uninitialized<A>(
1429
0
        alloc: &A,
1430
0
        table_layout: TableLayout,
1431
0
        buckets: usize,
1432
0
        fallibility: Fallibility,
1433
0
    ) -> Result<Self, TryReserveError>
1434
0
    where
1435
0
        A: Allocator,
1436
    {
1437
0
        debug_assert!(buckets.is_power_of_two());
1438
1439
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1440
0
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1441
0
            Some(lco) => lco,
1442
0
            None => return Err(fallibility.capacity_overflow()),
1443
        };
1444
1445
0
        let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1446
0
            Ok(block) => block.cast(),
1447
0
            Err(_) => return Err(fallibility.alloc_err(layout)),
1448
        };
1449
1450
        // SAFETY: null pointer will be caught in above check
1451
0
        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1452
0
        Ok(Self {
1453
0
            ctrl,
1454
0
            bucket_mask: buckets - 1,
1455
0
            items: 0,
1456
0
            growth_left: bucket_mask_to_capacity(buckets - 1),
1457
0
        })
1458
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::new_uninitialized::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::new_uninitialized::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::new_uninitialized::<_>
1459
1460
    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1461
    /// capacity for inserting the given number of elements without reallocating.
1462
    ///
1463
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1464
    #[inline]
1465
0
    fn fallible_with_capacity<A>(
1466
0
        alloc: &A,
1467
0
        table_layout: TableLayout,
1468
0
        capacity: usize,
1469
0
        fallibility: Fallibility,
1470
0
    ) -> Result<Self, TryReserveError>
1471
0
    where
1472
0
        A: Allocator,
1473
    {
1474
0
        if capacity == 0 {
1475
0
            Ok(Self::NEW)
1476
        } else {
1477
            // SAFETY: We checked that we could successfully allocate the new table, and then
1478
            // initialized all control bytes with the constant `Tag::EMPTY` byte.
1479
            unsafe {
1480
0
                let buckets =
1481
0
                    capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<allocator_api2::stable::alloc::global::Global>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<allocator_api2::stable::alloc::global::Global>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<_>::{closure#0}
1482
1483
0
                let mut result =
1484
0
                    Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1485
                // SAFETY: We checked that the table is allocated and therefore the table already has
1486
                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1487
                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1488
0
                result.ctrl_slice().fill_empty();
1489
1490
0
                Ok(result)
1491
            }
1492
        }
1493
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<_>
1494
1495
    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1496
    /// the given number of elements without reallocating.
1497
    ///
1498
    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1499
    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1500
    /// handle memory allocation failure.
1501
    ///
1502
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1503
    ///
1504
    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1505
    /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
1506
0
    fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
1507
0
    where
1508
0
        A: Allocator,
1509
    {
1510
        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1511
0
        match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
1512
0
            Ok(table_inner) => table_inner,
1513
            // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
1514
0
            Err(_) => unsafe { hint::unreachable_unchecked() },
1515
        }
1516
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::with_capacity::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::with_capacity::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::with_capacity::<_>
1517
1518
    /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
1519
    ///
1520
    /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
1521
    /// bytes outside the range of the table are filled with [`Tag::EMPTY`] entries. These will unfortunately
1522
    /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
1523
    /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
1524
    /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
1525
    /// We detect this situation here and perform a second scan starting at the beginning of the table.
1526
    /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
1527
    /// trailing control bytes (containing [`Tag::EMPTY`] bytes).
1528
    ///
1529
    /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
1530
    /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
1531
    /// `Safety`).
1532
    ///
1533
    /// # Warning
1534
    ///
1535
    /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
1536
    /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
1537
    /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
1538
    /// index will cause immediate [`undefined behavior`].
1539
    ///
1540
    /// # Safety
1541
    ///
1542
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
1543
    /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
1544
    /// of this crate, the following rules are necessary and sufficient:
1545
    ///
1546
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
1547
    ///   function results in [`undefined behavior`].
1548
    ///
1549
    /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
1550
    ///   (after the `find_insert_slot_in_group` function, but before insertion into the table).
1551
    ///
1552
    /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
1553
    ///   (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
1554
    ///
1555
    /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
1556
    /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
1557
    /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
1558
    ///
1559
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1560
    /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
1561
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1562
    #[inline]
1563
0
    unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1564
        // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1565
0
        if unlikely(self.is_bucket_full(index)) {
1566
0
            debug_assert!(self.bucket_mask < Group::WIDTH);
1567
            // SAFETY:
1568
            //
1569
            // * Since the caller of this function ensures that the control bytes are properly
1570
            //   initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1571
            //   bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1572
            //   and points to the properly initialized control bytes (see also
1573
            //   `TableLayout::calculate_layout_for` and `ptr::read`);
1574
            //
1575
            // * Because the caller of this function ensures that the index was provided by the
1576
            //   `self.find_insert_slot_in_group()` function, so for for tables larger than the
1577
            //   group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1578
            //   branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
1579
            //   cannot return a full bucket index. For tables smaller than the group width, calling
1580
            //   the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1581
            //   the range of the table are filled with EMPTY bytes (and we know for sure that there
1582
            //   is at least one FULL bucket), so this second scan either finds an empty slot (due to
1583
            //   the load factor) or hits the trailing control bytes (containing EMPTY).
1584
0
            index = Group::load_aligned(self.ctrl(0))
1585
0
                .match_empty_or_deleted()
1586
0
                .lowest_set_bit()
1587
0
                .unwrap_unchecked();
1588
0
        }
1589
0
        InsertSlot { index }
1590
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fix_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fix_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fix_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fix_insert_slot
1591
1592
    /// Finds the position to insert something in a group.
1593
    ///
1594
    /// **This may have false positives and must be fixed up with `fix_insert_slot`
1595
    /// before it's used.**
1596
    ///
1597
    /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
1598
    /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
1599
    #[inline]
1600
0
    fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1601
0
        let bit = group.match_empty_or_deleted().lowest_set_bit();
1602
1603
0
        if likely(bit.is_some()) {
1604
            // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1605
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1606
0
            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1607
        } else {
1608
0
            None
1609
        }
1610
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot_in_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot_in_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot_in_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot_in_group
1611
1612
    /// Searches for an element in the table, or a potential slot where that element could
1613
    /// be inserted (an empty or deleted [`Bucket`] index).
1614
    ///
1615
    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1616
    /// eliminated by LLVM optimizations.
1617
    ///
1618
    /// This function does not make any changes to the `data` part of the table, or any
1619
    /// changes to the `items` or `growth_left` field of the table.
1620
    ///
1621
    /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
1622
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
1623
    /// will never return (will go into an infinite loop) for tables larger than the group
1624
    /// width, or return an index outside of the table indices range if the table is less
1625
    /// than the group width.
1626
    ///
1627
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1628
    /// function with only `FULL` buckets' indices and return the `index` of the found
1629
    /// element (as `Ok(index)`). If the element is not found and there is at least 1
1630
    /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
1631
    /// [`InsertSlot`] with an index in the range `0..self.buckets()`, but in any case,
1632
    /// if this function returns [`InsertSlot`], it will contain an index in the range
1633
    /// `0..=self.buckets()`.
1634
    ///
1635
    /// # Safety
1636
    ///
1637
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1638
    /// this function results in [`undefined behavior`].
1639
    ///
1640
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1641
    /// less than the group width and if there was not at least one empty or deleted bucket in
1642
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1643
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1644
    /// control bytes outside the table range.
1645
    ///
1646
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1647
    #[inline]
1648
0
    unsafe fn find_or_find_insert_slot_inner(
1649
0
        &self,
1650
0
        hash: u64,
1651
0
        eq: &mut dyn FnMut(usize) -> bool,
1652
0
    ) -> Result<usize, InsertSlot> {
1653
0
        let mut insert_slot = None;
1654
1655
0
        let tag_hash = Tag::full(hash);
1656
0
        let mut probe_seq = self.probe_seq(hash);
1657
1658
        loop {
1659
            // SAFETY:
1660
            // * Caller of this function ensures that the control bytes are properly initialized.
1661
            //
1662
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1663
            //   of the table due to masking with `self.bucket_mask` and also because the number
1664
            //   of buckets is a power of two (see `self.probe_seq` function).
1665
            //
1666
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1667
            //   call `Group::load` due to the extended control bytes range, which is
1668
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1669
            //   byte will never be read for the allocated table);
1670
            //
1671
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1672
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1673
            //   bytes, which is safe (see RawTableInner::new).
1674
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1675
1676
0
            for bit in group.match_tag(tag_hash) {
1677
0
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1678
1679
0
                if likely(eq(index)) {
1680
0
                    return Ok(index);
1681
0
                }
1682
            }
1683
1684
            // We didn't find the element we were looking for in the group, try to get an
1685
            // insertion slot from the group if we don't have one yet.
1686
0
            if likely(insert_slot.is_none()) {
1687
0
                insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1688
0
            }
1689
1690
            // Only stop the search if the group contains at least one empty element.
1691
            // Otherwise, the element that we are looking for might be in a following group.
1692
0
            if likely(group.match_empty().any_bit_set()) {
1693
                // We must have found a insert slot by now, since the current group contains at
1694
                // least one. For tables smaller than the group width, there will still be an
1695
                // empty element in the current (and only) group due to the load factor.
1696
                unsafe {
1697
                    // SAFETY:
1698
                    // * Caller of this function ensures that the control bytes are properly initialized.
1699
                    //
1700
                    // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1701
0
                    return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked()));
1702
                }
1703
0
            }
1704
1705
0
            probe_seq.move_next(self.bucket_mask);
1706
        }
1707
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_or_find_insert_slot_inner
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_or_find_insert_slot_inner
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_or_find_insert_slot_inner
1708
1709
    /// Searches for an empty or deleted bucket which is suitable for inserting a new
1710
    /// element and sets the hash for that slot. Returns an index of that slot and the
1711
    /// old control byte stored in the found index.
1712
    ///
1713
    /// This function does not check if the given element exists in the table. Also,
1714
    /// this function does not check if there is enough space in the table to insert
1715
    /// a new element. The caller of the function must make sure that the table has at
1716
    /// least 1 empty or deleted `bucket`, otherwise this function will never return
1717
    /// (will go into an infinite loop) for tables larger than the group width, or
1718
    /// return an index outside of the table indices range if the table is less than
1719
    /// the group width.
1720
    ///
1721
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1722
    /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
1723
    /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
1724
    ///
1725
    /// This function does not make any changes to the `data` parts of the table,
1726
    /// or any changes to the `items` or `growth_left` field of the table.
1727
    ///
1728
    /// # Safety
1729
    ///
1730
    /// The safety rules are directly derived from the safety rules for the
1731
    /// [`RawTableInner::set_ctrl_hash`] and [`RawTableInner::find_insert_slot`] methods.
1732
    /// Thus, in order to uphold the safety contracts for that methods, as well as for
1733
    /// the correct logic of the work of this crate, you must observe the following rules
1734
    /// when calling this function:
1735
    ///
1736
    /// * The [`RawTableInner`] has already been allocated and has properly initialized
1737
    ///   control bytes otherwise calling this function results in [`undefined behavior`].
1738
    ///
1739
    /// * The caller of this function must ensure that the "data" parts of the table
1740
    ///   will have an entry in the returned index (matching the given hash) right
1741
    ///   after calling this function.
1742
    ///
1743
    /// Attempt to write data at the `index` returned by this function when the table is
1744
    /// less than the group width and if there was not at least one empty or deleted bucket in
1745
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1746
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1747
    /// control bytes outside the table range.
1748
    ///
1749
    /// The caller must independently increase the `items` field of the table, and also,
1750
    /// if the old control byte was [`Tag::EMPTY`], then decrease the table's `growth_left`
1751
    /// field, and do not change it if the old control byte was [`Tag::DELETED`].
1752
    ///
1753
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1754
    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
1755
    ///
1756
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1757
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1758
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1759
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
1760
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1761
    #[inline]
1762
0
    unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, Tag) {
1763
        // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
1764
0
        let index: usize = self.find_insert_slot(hash).index;
1765
        // SAFETY:
1766
        // 1. The `find_insert_slot` function either returns an `index` less than or
1767
        //    equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
1768
        //    returns if it cannot find an empty or deleted slot.
1769
        // 2. The caller of this function guarantees that the table has already been
1770
        //    allocated
1771
0
        let old_ctrl = *self.ctrl(index);
1772
0
        self.set_ctrl_hash(index, hash);
1773
0
        (index, old_ctrl)
1774
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_insert_slot
1775
1776
    /// Searches for an empty or deleted bucket which is suitable for inserting
1777
    /// a new element, returning the `index` for the new [`Bucket`].
1778
    ///
1779
    /// This function does not make any changes to the `data` part of the table, or any
1780
    /// changes to the `items` or `growth_left` field of the table.
1781
    ///
1782
    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
1783
    /// will never return (will go into an infinite loop) for tables larger than the group
1784
    /// width, or return an index outside of the table indices range if the table is less
1785
    /// than the group width.
1786
    ///
1787
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1788
    /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
1789
    /// but in any case, if this function returns [`InsertSlot`], it will contain an index
1790
    /// in the range `0..=self.buckets()`.
1791
    ///
1792
    /// # Safety
1793
    ///
1794
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1795
    /// this function results in [`undefined behavior`].
1796
    ///
1797
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1798
    /// less than the group width and if there was not at least one empty or deleted bucket in
1799
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1800
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1801
    /// control bytes outside the table range.
1802
    ///
1803
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1804
    #[inline]
1805
0
    unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
1806
0
        let mut probe_seq = self.probe_seq(hash);
1807
        loop {
1808
            // SAFETY:
1809
            // * Caller of this function ensures that the control bytes are properly initialized.
1810
            //
1811
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1812
            //   of the table due to masking with `self.bucket_mask` and also because the number
1813
            //   of buckets is a power of two (see `self.probe_seq` function).
1814
            //
1815
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1816
            //   call `Group::load` due to the extended control bytes range, which is
1817
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1818
            //   byte will never be read for the allocated table);
1819
            //
1820
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1821
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1822
            //   bytes, which is safe (see RawTableInner::new).
1823
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1824
1825
0
            let index = self.find_insert_slot_in_group(&group, &probe_seq);
1826
0
            if likely(index.is_some()) {
1827
                // SAFETY:
1828
                // * Caller of this function ensures that the control bytes are properly initialized.
1829
                //
1830
                // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1831
                unsafe {
1832
0
                    return self.fix_insert_slot(index.unwrap_unchecked());
1833
                }
1834
0
            }
1835
0
            probe_seq.move_next(self.bucket_mask);
1836
        }
1837
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot
1838
1839
    /// Searches for an element in a table, returning the `index` of the found element.
1840
    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
1841
    /// eliminated by LLVM optimizations.
1842
    ///
1843
    /// This function does not make any changes to the `data` part of the table, or any
1844
    /// changes to the `items` or `growth_left` field of the table.
1845
    ///
1846
    /// The table must have at least 1 empty `bucket`, otherwise, if the
1847
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
1848
    /// this function will also never return (will go into an infinite loop).
1849
    ///
1850
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1851
    /// function with only `FULL` buckets' indices and return the `index` of the found
1852
    /// element as `Some(index)`, so the index will always be in the range
1853
    /// `0..self.buckets()`.
1854
    ///
1855
    /// # Safety
1856
    ///
1857
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1858
    /// this function results in [`undefined behavior`].
1859
    ///
1860
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1861
    #[inline(always)]
1862
0
    unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
1863
0
        let tag_hash = Tag::full(hash);
1864
0
        let mut probe_seq = self.probe_seq(hash);
1865
1866
        loop {
1867
            // SAFETY:
1868
            // * Caller of this function ensures that the control bytes are properly initialized.
1869
            //
1870
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1871
            //   of the table due to masking with `self.bucket_mask`.
1872
            //
1873
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1874
            //   call `Group::load` due to the extended control bytes range, which is
1875
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1876
            //   byte will never be read for the allocated table);
1877
            //
1878
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1879
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1880
            //   bytes, which is safe (see RawTableInner::new_in).
1881
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1882
1883
0
            for bit in group.match_tag(tag_hash) {
1884
                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1885
                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1886
0
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1887
1888
0
                if likely(eq(index)) {
1889
0
                    return Some(index);
1890
0
                }
1891
            }
1892
1893
0
            if likely(group.match_empty().any_bit_set()) {
1894
0
                return None;
1895
0
            }
1896
1897
0
            probe_seq.move_next(self.bucket_mask);
1898
        }
1899
0
    }
1900
1901
    /// Prepares for rehashing data in place (that is, without allocating new memory).
1902
    /// Converts all full index `control bytes` to `Tag::DELETED` and all `Tag::DELETED` control
1903
    /// bytes to `Tag::EMPTY`, i.e. performs the following conversion:
1904
    ///
1905
    /// - `Tag::EMPTY` control bytes   -> `Tag::EMPTY`;
1906
    /// - `Tag::DELETED` control bytes -> `Tag::EMPTY`;
1907
    /// - `FULL` control bytes    -> `Tag::DELETED`.
1908
    ///
1909
    /// This function does not make any changes to the `data` parts of the table,
1910
    /// or any changes to the `items` or `growth_left` field of the table.
1911
    ///
1912
    /// # Safety
1913
    ///
1914
    /// You must observe the following safety rules when calling this function:
1915
    ///
1916
    /// * The [`RawTableInner`] has already been allocated;
1917
    ///
1918
    /// * The caller of this function must convert the `Tag::DELETED` bytes back to `FULL`
1919
    ///   bytes when re-inserting them into their ideal position (which was impossible
1920
    ///   to do during the first insert due to tombstones). If the caller does not do
1921
    ///   this, then calling this function may result in a memory leak.
1922
    ///
1923
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
1924
    ///   calling this function results in [`undefined behavior`].
1925
    ///
1926
    /// Calling this function on a table that has not been allocated results in
1927
    /// [`undefined behavior`].
1928
    ///
1929
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1930
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
1931
    ///
1932
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1933
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1934
    #[allow(clippy::mut_mut)]
1935
    #[inline]
1936
0
    unsafe fn prepare_rehash_in_place(&mut self) {
1937
        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
1938
        // This effectively frees up all buckets containing a DELETED entry.
1939
        //
1940
        // SAFETY:
1941
        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
1942
        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
1943
        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
1944
        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
1945
        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
1946
        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
1947
0
        for i in (0..self.buckets()).step_by(Group::WIDTH) {
1948
0
            let group = Group::load_aligned(self.ctrl(i));
1949
0
            let group = group.convert_special_to_empty_and_full_to_deleted();
1950
0
            group.store_aligned(self.ctrl(i));
1951
0
        }
1952
1953
        // Fix up the trailing control bytes. See the comments in set_ctrl
1954
        // for the handling of tables smaller than the group width.
1955
        //
1956
        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
1957
        // has already been allocated
1958
0
        if unlikely(self.buckets() < Group::WIDTH) {
1959
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
1960
0
            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
1961
0
            // `Group::WIDTH` is safe
1962
0
            self.ctrl(0)
1963
0
                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
1964
0
        } else {
1965
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
1966
0
            // control bytes,so copying `Group::WIDTH` bytes with offset equal
1967
0
            // to `self.buckets() == self.bucket_mask + 1` is safe
1968
0
            self.ctrl(0)
1969
0
                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
1970
0
        }
1971
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
1972
1973
    /// Returns an iterator over every element in the table.
1974
    ///
1975
    /// # Safety
1976
    ///
1977
    /// If any of the following conditions are violated, the result
1978
    /// is [`undefined behavior`]:
1979
    ///
1980
    /// * The caller has to ensure that the `RawTableInner` outlives the
1981
    ///   `RawIter`. Because we cannot make the `next` method unsafe on
1982
    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
1983
    ///
1984
    /// * The [`RawTableInner`] must have properly initialized control bytes.
1985
    ///
1986
    /// The type `T` must be the actual type of the elements stored in the table,
1987
    /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
1988
    ///
1989
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1990
    #[inline]
1991
0
    unsafe fn iter<T>(&self) -> RawIter<T> {
1992
        // SAFETY:
1993
        // 1. Since the caller of this function ensures that the control bytes
1994
        //    are properly initialized and `self.data_end()` points to the start
1995
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
1996
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
1997
        //    control bytes.
1998
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
1999
        //    equal to zero).
2000
        // 3. We pass the exact value of buckets of the table to the function.
2001
        //
2002
        //                         `ctrl` points here (to the start
2003
        //                         of the first control byte `CT0`)
2004
        //                          ∨
2005
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2006
        //                           \________  ________/
2007
        //                                    \/
2008
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2009
        //
2010
        // where: T0...T_n  - our stored data;
2011
        //        CT0...CT_n - control bytes or metadata for `data`.
2012
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2013
        //                        with loading `Group` bytes from the heap works properly, even if the result
2014
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2015
        //                        `RawTableInner::set_ctrl` function.
2016
        //
2017
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2018
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2019
0
        let data = Bucket::from_base_index(self.data_end(), 0);
2020
0
        RawIter {
2021
0
            // SAFETY: See explanation above
2022
0
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2023
0
            items: self.items,
2024
0
        }
2025
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::iter::<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::iter::<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::iter::<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::iter::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::iter::<_>
2026
2027
    /// Executes the destructors (if any) of the values stored in the table.
2028
    ///
2029
    /// # Note
2030
    ///
2031
    /// This function does not erase the control bytes of the table and does
2032
    /// not make any changes to the `items` or `growth_left` fields of the
2033
    /// table. If necessary, the caller of this function must manually set
2034
    /// up these table fields, for example using the [`clear_no_drop`] function.
2035
    ///
2036
    /// Be careful during calling this function, because drop function of
2037
    /// the elements can panic, and this can leave table in an inconsistent
2038
    /// state.
2039
    ///
2040
    /// # Safety
2041
    ///
2042
    /// The type `T` must be the actual type of the elements stored in the table,
2043
    /// otherwise calling this function may result in [`undefined behavior`].
2044
    ///
2045
    /// If `T` is a type that should be dropped and **the table is not empty**,
2046
    /// calling this function more than once results in [`undefined behavior`].
2047
    ///
2048
    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2049
    /// calling this function may result in [`undefined behavior`].
2050
    ///
2051
    /// It is safe to call this function on a table that has not been allocated,
2052
    /// on a table with uninitialized control bytes, and on a table with no actual
2053
    /// data but with `Full` control bytes if `self.items == 0`.
2054
    ///
2055
    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2056
    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2057
    /// [`RawTableInner`].
2058
    ///
2059
    /// [`Bucket::drop`]: Bucket::drop
2060
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2061
    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2062
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2063
0
    unsafe fn drop_elements<T>(&mut self) {
2064
        // Check that `self.items != 0`. Protects against the possibility
2065
        // of creating an iterator on an table with uninitialized control bytes.
2066
0
        if T::NEEDS_DROP && self.items != 0 {
2067
            // SAFETY: We know for sure that RawTableInner will outlive the
2068
            // returned `RawIter` iterator, and the caller of this function
2069
            // must uphold the safety contract for `drop_elements` method.
2070
0
            for item in self.iter::<T>() {
2071
0
                // SAFETY: The caller must uphold the safety contract for
2072
0
                // `drop_elements` method.
2073
0
                item.drop();
2074
0
            }
2075
0
        }
2076
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<_>
2077
2078
    /// Executes the destructors (if any) of the values stored in the table and than
2079
    /// deallocates the table.
2080
    ///
2081
    /// # Note
2082
    ///
2083
    /// Calling this function automatically makes invalid (dangling) all instances of
2084
    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2085
    ///
2086
    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2087
    /// fields of the table. If necessary, the caller of this function must manually set
2088
    /// up these table fields.
2089
    ///
2090
    /// # Safety
2091
    ///
2092
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2093
    ///
2094
    /// * Calling this function more than once;
2095
    ///
2096
    /// * The type `T` must be the actual type of the elements stored in the table.
2097
    ///
2098
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2099
    ///   to allocate this table.
2100
    ///
2101
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2102
    ///   was used to allocate this table.
2103
    ///
2104
    /// The caller of this function should pay attention to the possibility of the
2105
    /// elements' drop function panicking, because this:
2106
    ///
2107
    ///    * May leave the table in an inconsistent state;
2108
    ///
2109
    ///    * Memory is never deallocated, so a memory leak may occur.
2110
    ///
2111
    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2112
    /// function results in [`undefined behavior`].
2113
    ///
2114
    /// It is safe to call this function on a table that has not been allocated,
2115
    /// on a table with uninitialized control bytes, and on a table with no actual
2116
    /// data but with `Full` control bytes if `self.items == 0`.
2117
    ///
2118
    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2119
    /// for more  information.
2120
    ///
2121
    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2122
    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2123
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2124
0
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2125
0
        if !self.is_empty_singleton() {
2126
0
            unsafe {
2127
0
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2128
0
                self.drop_elements::<T>();
2129
0
                // SAFETY:
2130
0
                // 1. We have checked that our table is allocated.
2131
0
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2132
0
                self.free_buckets(alloc, table_layout);
2133
0
            }
2134
0
        }
2135
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>), allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState), allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns)), allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<usize, allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<usize, allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<_, _>
2136
2137
    /// Returns a pointer to an element in the table (convenience for
2138
    /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
2139
    ///
2140
    /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
2141
    /// otherwise using it may result in [`undefined behavior`].
2142
    ///
2143
    /// # Safety
2144
    ///
2145
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
2146
    /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
2147
    /// this function, the following safety rules must be observed:
2148
    ///
2149
    /// * The table must already be allocated;
2150
    ///
2151
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2152
    ///   function, i.e. `(index + 1) <= self.buckets()`.
2153
    ///
2154
    /// * The type `T` must be the actual type of the elements stored in the table, otherwise
2155
    ///   using the returned [`Bucket`] may result in [`undefined behavior`].
2156
    ///
2157
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
2158
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
2159
    ///
2160
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
2161
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
2162
    /// `(index + 1) <= self.buckets()`.
2163
    ///
2164
    /// ```none
2165
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2166
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2167
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2168
    ///
2169
    ///           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
2170
    ///           part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
2171
    ///                  |
2172
    ///                  |               `base = table.data_end::<T>()` points here
2173
    ///                  |               (to the start of CT0 or to the end of T0)
2174
    ///                  v                 v
2175
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2176
    ///                     ^                                              \__________  __________/
2177
    ///        `table.bucket(3)` returns a pointer that points                        \/
2178
    ///         here in the `data` part of the `RawTableInner`             additional control bytes
2179
    ///         (to the end of T3)                                          `m = Group::WIDTH - 1`
2180
    ///
2181
    /// where: T0...T_n  - our stored data;
2182
    ///        CT0...CT_n - control bytes or metadata for `data`;
2183
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2184
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2185
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2186
    ///
2187
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2188
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2189
    /// ```
2190
    ///
2191
    /// [`Bucket::from_base_index`]: Bucket::from_base_index
2192
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2193
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2194
    #[inline]
2195
0
    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2196
0
        debug_assert_ne!(self.bucket_mask, 0);
2197
0
        debug_assert!(index < self.buckets());
2198
0
        Bucket::from_base_index(self.data_end(), index)
2199
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<_>
2200
2201
    /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
2202
    /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
2203
    ///
2204
    /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
2205
    /// otherwise using it may result in [`undefined behavior`].
2206
    ///
2207
    /// # Safety
2208
    ///
2209
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2210
    ///
2211
    /// * The table must already be allocated;
2212
    ///
2213
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2214
    ///   function, i.e. `(index + 1) <= self.buckets()`;
2215
    ///
2216
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2217
    ///
2218
    /// ```none
2219
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2220
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2221
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2222
    ///
2223
    ///           `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
2224
    ///           `data` part of the `RawTableInner`, i.e. to the start of T3
2225
    ///                  |
2226
    ///                  |               `base = table.data_end::<u8>()` points here
2227
    ///                  |               (to the start of CT0 or to the end of T0)
2228
    ///                  v                 v
2229
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2230
    ///                                                                    \__________  __________/
2231
    ///                                                                               \/
2232
    ///                                                                    additional control bytes
2233
    ///                                                                     `m = Group::WIDTH - 1`
2234
    ///
2235
    /// where: T0...T_n  - our stored data;
2236
    ///        CT0...CT_n - control bytes or metadata for `data`;
2237
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2238
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2239
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2240
    ///
2241
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2242
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2243
    /// ```
2244
    ///
2245
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2246
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2247
    #[inline]
2248
0
    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2249
0
        debug_assert_ne!(self.bucket_mask, 0);
2250
0
        debug_assert!(index < self.buckets());
2251
0
        let base: *mut u8 = self.data_end().as_ptr();
2252
0
        base.sub((index + 1) * size_of)
2253
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket_ptr
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket_ptr
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket_ptr
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket_ptr
2254
2255
    /// Returns pointer to one past last `data` element in the table as viewed from
2256
    /// the start point of the allocation (convenience for `self.ctrl.cast()`).
2257
    ///
2258
    /// This function actually returns a pointer to the end of the `data element` at
2259
    /// index "0" (zero).
2260
    ///
2261
    /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
2262
    /// otherwise using it may result in [`undefined behavior`].
2263
    ///
2264
    /// # Note
2265
    ///
2266
    /// The type `T` must be the actual type of the elements stored in the table, otherwise
2267
    /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
2268
    ///
2269
    /// ```none
2270
    ///                        `table.data_end::<T>()` returns pointer that points here
2271
    ///                        (to the end of `T0`)
2272
    ///                          ∨
2273
    /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2274
    ///                           \________  ________/
2275
    ///                                    \/
2276
    ///       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2277
    ///
2278
    /// where: T0...T_n  - our stored data;
2279
    ///        CT0...CT_n - control bytes or metadata for `data`.
2280
    ///        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2281
    ///                        with loading `Group` bytes from the heap works properly, even if the result
2282
    ///                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2283
    ///                        `RawTableInner::set_ctrl` function.
2284
    ///
2285
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2286
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2287
    /// ```
2288
    ///
2289
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2290
    #[inline]
2291
0
    fn data_end<T>(&self) -> NonNull<T> {
2292
0
        self.ctrl.cast()
2293
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<u8>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<u8>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<_>
2294
2295
    /// Returns an iterator-like object for a probe sequence on the table.
2296
    ///
2297
    /// This iterator never terminates, but is guaranteed to visit each bucket
2298
    /// group exactly once. The loop using `probe_seq` must terminate upon
2299
    /// reaching a group containing an empty bucket.
2300
    #[inline]
2301
0
    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2302
0
        ProbeSeq {
2303
0
            // This is the same as `hash as usize % self.buckets()` because the number
2304
0
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2305
0
            pos: h1(hash) & self.bucket_mask,
2306
0
            stride: 0,
2307
0
        }
2308
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::probe_seq
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::probe_seq
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::probe_seq
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::probe_seq
2309
2310
    #[inline]
2311
0
    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, hash: u64) {
2312
0
        self.growth_left -= usize::from(old_ctrl.special_is_empty());
2313
0
        self.set_ctrl_hash(index, hash);
2314
0
        self.items += 1;
2315
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::record_item_insert_at
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::record_item_insert_at
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::record_item_insert_at
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::record_item_insert_at
2316
2317
    #[inline]
2318
0
    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2319
0
        let probe_seq_pos = self.probe_seq(hash).pos;
2320
0
        let probe_index =
2321
0
            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group::{closure#0}
2322
0
        probe_index(i) == probe_index(new_i)
2323
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
2324
2325
    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2326
    /// the end of the array.
2327
    ///
2328
    /// This function does not make any changes to the `data` parts of the table,
2329
    /// or any changes to the `items` or `growth_left` field of the table.
2330
    ///
2331
    /// # Safety
2332
    ///
2333
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2334
    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2335
    /// following rules when calling this function:
2336
    ///
2337
    /// * The [`RawTableInner`] has already been allocated;
2338
    ///
2339
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2340
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2341
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2342
    ///
2343
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2344
    ///
2345
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2346
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2347
    ///
2348
    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2349
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2350
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2351
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2352
    #[inline]
2353
0
    unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) {
2354
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`]
2355
0
        self.set_ctrl(index, Tag::full(hash));
2356
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl_hash
2357
2358
    /// Replaces the hash in the control byte at the given index with the provided one,
2359
    /// and possibly also replicates the new control byte at the end of the array of control
2360
    /// bytes, returning the old control byte.
2361
    ///
2362
    /// This function does not make any changes to the `data` parts of the table,
2363
    /// or any changes to the `items` or `growth_left` field of the table.
2364
    ///
2365
    /// # Safety
2366
    ///
2367
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_hash`]
2368
    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2369
    /// methods, you must observe the following rules when calling this function:
2370
    ///
2371
    /// * The [`RawTableInner`] has already been allocated;
2372
    ///
2373
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2374
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2375
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2376
    ///
2377
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2378
    ///
2379
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2380
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2381
    ///
2382
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
2383
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2384
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2385
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2386
    #[inline]
2387
0
    unsafe fn replace_ctrl_hash(&mut self, index: usize, hash: u64) -> Tag {
2388
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`]
2389
0
        let prev_ctrl = *self.ctrl(index);
2390
0
        self.set_ctrl_hash(index, hash);
2391
0
        prev_ctrl
2392
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
2393
2394
    /// Sets a control byte, and possibly also the replicated control byte at
2395
    /// the end of the array.
2396
    ///
2397
    /// This function does not make any changes to the `data` parts of the table,
2398
    /// or any changes to the `items` or `growth_left` field of the table.
2399
    ///
2400
    /// # Safety
2401
    ///
2402
    /// You must observe the following safety rules when calling this function:
2403
    ///
2404
    /// * The [`RawTableInner`] has already been allocated;
2405
    ///
2406
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2407
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2408
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2409
    ///
2410
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2411
    ///
2412
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2413
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2414
    ///
2415
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2416
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2417
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2418
    #[inline]
2419
0
    unsafe fn set_ctrl(&mut self, index: usize, ctrl: Tag) {
2420
        // Replicate the first Group::WIDTH control bytes at the end of
2421
        // the array without using a branch. If the tables smaller than
2422
        // the group width (self.buckets() < Group::WIDTH),
2423
        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2424
        //
2425
        // - If index >= Group::WIDTH then index == index2.
2426
        // - Otherwise index2 == self.bucket_mask + 1 + index.
2427
        //
2428
        // The very last replicated control byte is never actually read because
2429
        // we mask the initial index for unaligned loads, but we write it
2430
        // anyways because it makes the set_ctrl implementation simpler.
2431
        //
2432
        // If there are fewer buckets than Group::WIDTH then this code will
2433
        // replicate the buckets at the end of the trailing group. For example
2434
        // with 2 buckets and a group size of 4, the control bytes will look
2435
        // like this:
2436
        //
2437
        //     Real    |             Replicated
2438
        // ---------------------------------------------
2439
        // | [A] | [B] | [Tag::EMPTY] | [EMPTY] | [A] | [B] |
2440
        // ---------------------------------------------
2441
2442
        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2443
        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2444
0
        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2445
2446
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2447
0
        *self.ctrl(index) = ctrl;
2448
0
        *self.ctrl(index2) = ctrl;
2449
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl
2450
2451
    /// Returns a pointer to a control byte.
2452
    ///
2453
    /// # Safety
2454
    ///
2455
    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2456
    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2457
    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2458
    /// will return a pointer to the end of the allocated table and it is useless on its own.
2459
    ///
2460
    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2461
    /// table that has not been allocated results in [`Undefined Behavior`].
2462
    ///
2463
    /// So to satisfy both requirements you should always follow the rule that
2464
    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2465
    ///
2466
    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2467
    /// for read-only purpose.
2468
    ///
2469
    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2470
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2471
    ///
2472
    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2473
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2474
    #[inline]
2475
0
    unsafe fn ctrl(&self, index: usize) -> *mut Tag {
2476
0
        debug_assert!(index < self.num_ctrl_bytes());
2477
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2478
0
        self.ctrl.as_ptr().add(index).cast()
2479
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::ctrl
2480
2481
    /// Gets the slice of all control bytes.
2482
0
    fn ctrl_slice(&mut self) -> &mut [Tag] {
2483
        // SAFETY: We've intiailized all control bytes, and have the correct number.
2484
0
        unsafe { slice::from_raw_parts_mut(self.ctrl.as_ptr().cast(), self.num_ctrl_bytes()) }
2485
0
    }
2486
2487
    #[inline]
2488
0
    fn buckets(&self) -> usize {
2489
0
        self.bucket_mask + 1
2490
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::buckets
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::buckets
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::buckets
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::buckets
2491
2492
    /// Checks whether the bucket at `index` is full.
2493
    ///
2494
    /// # Safety
2495
    ///
2496
    /// The caller must ensure `index` is less than the number of buckets.
2497
    #[inline]
2498
0
    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2499
0
        debug_assert!(index < self.buckets());
2500
0
        (*self.ctrl(index)).is_full()
2501
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_bucket_full
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_bucket_full
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_bucket_full
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_bucket_full
2502
2503
    #[inline]
2504
0
    fn num_ctrl_bytes(&self) -> usize {
2505
0
        self.bucket_mask + 1 + Group::WIDTH
2506
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::num_ctrl_bytes
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::num_ctrl_bytes
2507
2508
    #[inline]
2509
0
    fn is_empty_singleton(&self) -> bool {
2510
0
        self.bucket_mask == 0
2511
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_empty_singleton
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_empty_singleton
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_empty_singleton
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_empty_singleton
2512
2513
    /// Attempts to allocate a new hash table with at least enough capacity
2514
    /// for inserting the given number of elements without reallocating,
2515
    /// and return it inside `ScopeGuard` to protect against panic in the hash
2516
    /// function.
2517
    ///
2518
    /// # Note
2519
    ///
2520
    /// It is recommended (but not required):
2521
    ///
2522
    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2523
    ///
2524
    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2525
    ///   to allocate this table.
2526
    ///
2527
    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2528
    ///   to allocate this table.
2529
    ///
2530
    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2531
    /// this table, then using `mem::swap` with the `self` and the new table returned
2532
    /// by this function results in [`undefined behavior`].
2533
    ///
2534
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2535
    #[allow(clippy::mut_mut)]
2536
    #[inline]
2537
0
    fn prepare_resize<'a, A>(
2538
0
        &self,
2539
0
        alloc: &'a A,
2540
0
        table_layout: TableLayout,
2541
0
        capacity: usize,
2542
0
        fallibility: Fallibility,
2543
0
    ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2544
0
    where
2545
0
        A: Allocator,
2546
    {
2547
0
        debug_assert!(self.items <= capacity);
2548
2549
        // Allocate and initialize the new table.
2550
0
        let new_table =
2551
0
            RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2552
2553
        // The hash function may panic, in which case we simply free the new
2554
        // table without dropping any elements that may have been copied into
2555
        // it.
2556
        //
2557
        // This guard is also used to free the old table on success, see
2558
        // the comment at the bottom of this function.
2559
0
        Ok(guard(new_table, move |self_| {
2560
0
            if !self_.is_empty_singleton() {
2561
0
                // SAFETY:
2562
0
                // 1. We have checked that our table is allocated.
2563
0
                // 2. We know for sure that the `alloc` and `table_layout` matches the
2564
0
                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2565
0
                unsafe { self_.free_buckets(alloc, table_layout) };
2566
0
            }
2567
0
        }))
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<allocator_api2::stable::alloc::global::Global>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<allocator_api2::stable::alloc::global::Global>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<_>::{closure#0}
2568
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<_>
2569
2570
    /// Reserves or rehashes to make room for `additional` more elements.
2571
    ///
2572
    /// This uses dynamic dispatch to reduce the amount of
2573
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2574
    ///
2575
    /// # Safety
2576
    ///
2577
    /// If any of the following conditions are violated, the result is
2578
    /// [`undefined behavior`]:
2579
    ///
2580
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2581
    ///   to allocate this table.
2582
    ///
2583
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2584
    ///   used to allocate this table.
2585
    ///
2586
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2587
    ///   the elements stored in the table.
2588
    ///
2589
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2590
    ///
2591
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2592
    #[allow(clippy::inline_always)]
2593
    #[inline(always)]
2594
0
    unsafe fn reserve_rehash_inner<A>(
2595
0
        &mut self,
2596
0
        alloc: &A,
2597
0
        additional: usize,
2598
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2599
0
        fallibility: Fallibility,
2600
0
        layout: TableLayout,
2601
0
        drop: Option<unsafe fn(*mut u8)>,
2602
0
    ) -> Result<(), TryReserveError>
2603
0
    where
2604
0
        A: Allocator,
2605
    {
2606
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2607
0
        let new_items = match self.items.checked_add(additional) {
2608
0
            Some(new_items) => new_items,
2609
0
            None => return Err(fallibility.capacity_overflow()),
2610
        };
2611
0
        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2612
0
        if new_items <= full_capacity / 2 {
2613
            // Rehash in-place without re-allocating if we have plenty of spare
2614
            // capacity that is locked up due to DELETED entries.
2615
2616
            // SAFETY:
2617
            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2618
            //    (since new_items <= full_capacity / 2);
2619
            // 2. The caller ensures that `drop` function is the actual drop function of
2620
            //    the elements stored in the table.
2621
            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2622
            //    used to allocate this table.
2623
            // 4. The caller ensures that the control bytes of the `RawTableInner`
2624
            //    are already initialized.
2625
0
            self.rehash_in_place(hasher, layout.size, drop);
2626
0
            Ok(())
2627
        } else {
2628
            // Otherwise, conservatively resize to at least the next size up
2629
            // to avoid churning deletes into frequent rehashes.
2630
            //
2631
            // SAFETY:
2632
            // 1. We know for sure that `capacity >= self.items`.
2633
            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2634
            //    [`TableLayout`] that were used to allocate this table.
2635
            // 3. The caller ensures that the control bytes of the `RawTableInner`
2636
            //    are already initialized.
2637
0
            self.resize_inner(
2638
0
                alloc,
2639
0
                usize::max(new_items, full_capacity + 1),
2640
0
                hasher,
2641
0
                fallibility,
2642
0
                layout,
2643
            )
2644
        }
2645
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::reserve_rehash_inner::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::reserve_rehash_inner::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::reserve_rehash_inner::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::reserve_rehash_inner::<_>
2646
2647
    /// Returns an iterator over full buckets indices in the table.
2648
    ///
2649
    /// # Safety
2650
    ///
2651
    /// Behavior is undefined if any of the following conditions are violated:
2652
    ///
2653
    /// * The caller has to ensure that the `RawTableInner` outlives the
2654
    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2655
    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2656
    ///   `full_buckets_indices` method unsafe.
2657
    ///
2658
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2659
    #[inline(always)]
2660
0
    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2661
        // SAFETY:
2662
        // 1. Since the caller of this function ensures that the control bytes
2663
        //    are properly initialized and `self.ctrl(0)` points to the start
2664
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2665
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2666
        //    control bytes.
2667
        // 2. The value of `items` is equal to the amount of data (values) added
2668
        //    to the table.
2669
        //
2670
        //                         `ctrl` points here (to the start
2671
        //                         of the first control byte `CT0`)
2672
        //                          ∨
2673
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2674
        //                           \________  ________/
2675
        //                                    \/
2676
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2677
        //
2678
        // where: T0...T_n  - our stored data;
2679
        //        CT0...CT_n - control bytes or metadata for `data`.
2680
0
        let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::<u8>());
2681
2682
0
        FullBucketsIndices {
2683
0
            // Load the first group
2684
0
            // SAFETY: See explanation above.
2685
0
            current_group: Group::load_aligned(ctrl.as_ptr().cast())
2686
0
                .match_full()
2687
0
                .into_iter(),
2688
0
            group_first_index: 0,
2689
0
            ctrl,
2690
0
            items: self.items,
2691
0
        }
2692
0
    }
2693
2694
    /// Allocates a new table of a different size and moves the contents of the
2695
    /// current table into it.
2696
    ///
2697
    /// This uses dynamic dispatch to reduce the amount of
2698
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2699
    ///
2700
    /// # Safety
2701
    ///
2702
    /// If any of the following conditions are violated, the result is
2703
    /// [`undefined behavior`]:
2704
    ///
2705
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2706
    ///   to allocate this table;
2707
    ///
2708
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2709
    ///   used to allocate this table;
2710
    ///
2711
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2712
    ///
2713
    /// The caller of this function must ensure that `capacity >= self.items`
2714
    /// otherwise:
2715
    ///
2716
    /// * If `self.items != 0`, calling of this function with `capacity == 0`
2717
    ///   results in [`undefined behavior`].
2718
    ///
2719
    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
2720
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2721
    ///   results in [`undefined behavior`].
2722
    ///
2723
    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
2724
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2725
    ///   are never return (will go into an infinite loop).
2726
    ///
2727
    /// Note: It is recommended (but not required) that the new table's `capacity`
2728
    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
2729
    /// this function can never return. See [`RawTableInner::find_insert_slot`] for
2730
    /// more information.
2731
    ///
2732
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2733
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2734
    #[allow(clippy::inline_always)]
2735
    #[inline(always)]
2736
0
    unsafe fn resize_inner<A>(
2737
0
        &mut self,
2738
0
        alloc: &A,
2739
0
        capacity: usize,
2740
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2741
0
        fallibility: Fallibility,
2742
0
        layout: TableLayout,
2743
0
    ) -> Result<(), TryReserveError>
2744
0
    where
2745
0
        A: Allocator,
2746
    {
2747
        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2748
        // that were used to allocate this table.
2749
0
        let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
2750
2751
        // SAFETY: We know for sure that RawTableInner will outlive the
2752
        // returned `FullBucketsIndices` iterator, and the caller of this
2753
        // function ensures that the control bytes are properly initialized.
2754
0
        for full_byte_index in self.full_buckets_indices() {
2755
0
            // This may panic.
2756
0
            let hash = hasher(self, full_byte_index);
2757
0
2758
0
            // SAFETY:
2759
0
            // We can use a simpler version of insert() here since:
2760
0
            // 1. There are no DELETED entries.
2761
0
            // 2. We know there is enough space in the table.
2762
0
            // 3. All elements are unique.
2763
0
            // 4. The caller of this function guarantees that `capacity > 0`
2764
0
            //    so `new_table` must already have some allocated memory.
2765
0
            // 5. We set `growth_left` and `items` fields of the new table
2766
0
            //    after the loop.
2767
0
            // 6. We insert into the table, at the returned index, the data
2768
0
            //    matching the given hash immediately after calling this function.
2769
0
            let (new_index, _) = new_table.prepare_insert_slot(hash);
2770
0
2771
0
            // SAFETY:
2772
0
            //
2773
0
            // * `src` is valid for reads of `layout.size` bytes, since the
2774
0
            //   table is alive and the `full_byte_index` is guaranteed to be
2775
0
            //   within bounds (see `FullBucketsIndices::next_impl`);
2776
0
            //
2777
0
            // * `dst` is valid for writes of `layout.size` bytes, since the
2778
0
            //   caller ensures that `table_layout` matches the [`TableLayout`]
2779
0
            //   that was used to allocate old table and we have the `new_index`
2780
0
            //   returned by `prepare_insert_slot`.
2781
0
            //
2782
0
            // * Both `src` and `dst` are properly aligned.
2783
0
            //
2784
0
            // * Both `src` and `dst` point to different region of memory.
2785
0
            ptr::copy_nonoverlapping(
2786
0
                self.bucket_ptr(full_byte_index, layout.size),
2787
0
                new_table.bucket_ptr(new_index, layout.size),
2788
0
                layout.size,
2789
0
            );
2790
0
        }
2791
2792
        // The hash function didn't panic, so we can safely set the
2793
        // `growth_left` and `items` fields of the new table.
2794
0
        new_table.growth_left -= self.items;
2795
0
        new_table.items = self.items;
2796
2797
        // We successfully copied all elements without panicking. Now replace
2798
        // self with the new table. The old table will have its memory freed but
2799
        // the items will not be dropped (since they have been moved into the
2800
        // new table).
2801
        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2802
        // that was used to allocate this table.
2803
0
        mem::swap(self, &mut new_table);
2804
2805
0
        Ok(())
2806
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::resize_inner::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::resize_inner::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::resize_inner::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::resize_inner::<_>
2807
2808
    /// Rehashes the contents of the table in place (i.e. without changing the
2809
    /// allocation).
2810
    ///
2811
    /// If `hasher` panics then some the table's contents may be lost.
2812
    ///
2813
    /// This uses dynamic dispatch to reduce the amount of
2814
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2815
    ///
2816
    /// # Safety
2817
    ///
2818
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2819
    ///
2820
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2821
    ///
2822
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2823
    ///   the elements stored in the table.
2824
    ///
2825
    /// * The [`RawTableInner`] has already been allocated;
2826
    ///
2827
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2828
    ///
2829
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2830
    #[allow(clippy::inline_always)]
2831
    #[cfg_attr(feature = "inline-more", inline(always))]
2832
    #[cfg_attr(not(feature = "inline-more"), inline)]
2833
0
    unsafe fn rehash_in_place(
2834
0
        &mut self,
2835
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2836
0
        size_of: usize,
2837
0
        drop: Option<unsafe fn(*mut u8)>,
2838
0
    ) {
2839
        // If the hash function panics then properly clean up any elements
2840
        // that we haven't rehashed yet. We unfortunately can't preserve the
2841
        // element since we lost their hash and have no way of recovering it
2842
        // without risking another panic.
2843
0
        self.prepare_rehash_in_place();
2844
2845
0
        let mut guard = guard(self, move |self_| {
2846
0
            if let Some(drop) = drop {
2847
0
                for i in 0..self_.buckets() {
2848
0
                    if *self_.ctrl(i) == Tag::DELETED {
2849
0
                        self_.set_ctrl(i, Tag::EMPTY);
2850
0
                        drop(self_.bucket_ptr(i, size_of));
2851
0
                        self_.items -= 1;
2852
0
                    }
2853
                }
2854
0
            }
2855
0
            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
2856
0
        });
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place::{closure#0}
2857
2858
        // At this point, DELETED elements are elements that we haven't
2859
        // rehashed yet. Find them and re-insert them at their ideal
2860
        // position.
2861
0
        'outer: for i in 0..guard.buckets() {
2862
0
            if *guard.ctrl(i) != Tag::DELETED {
2863
0
                continue;
2864
0
            }
2865
2866
0
            let i_p = guard.bucket_ptr(i, size_of);
2867
2868
            'inner: loop {
2869
                // Hash the current item
2870
0
                let hash = hasher(*guard, i);
2871
2872
                // Search for a suitable place to put it
2873
                //
2874
                // SAFETY: Caller of this function ensures that the control bytes
2875
                // are properly initialized.
2876
0
                let new_i = guard.find_insert_slot(hash).index;
2877
2878
                // Probing works by scanning through all of the control
2879
                // bytes in groups, which may not be aligned to the group
2880
                // size. If both the new and old position fall within the
2881
                // same unaligned group, then there is no benefit in moving
2882
                // it and we can just continue to the next item.
2883
0
                if likely(guard.is_in_same_group(i, new_i, hash)) {
2884
0
                    guard.set_ctrl_hash(i, hash);
2885
0
                    continue 'outer;
2886
0
                }
2887
2888
0
                let new_i_p = guard.bucket_ptr(new_i, size_of);
2889
2890
                // We are moving the current item to a new position. Write
2891
                // our H2 to the control byte of the new position.
2892
0
                let prev_ctrl = guard.replace_ctrl_hash(new_i, hash);
2893
0
                if prev_ctrl == Tag::EMPTY {
2894
0
                    guard.set_ctrl(i, Tag::EMPTY);
2895
                    // If the target slot is empty, simply move the current
2896
                    // element into the new slot and clear the old control
2897
                    // byte.
2898
0
                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
2899
0
                    continue 'outer;
2900
                } else {
2901
                    // If the target slot is occupied, swap the two elements
2902
                    // and then continue processing the element that we just
2903
                    // swapped into the old slot.
2904
0
                    debug_assert_eq!(prev_ctrl, Tag::DELETED);
2905
0
                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
2906
0
                    continue 'inner;
2907
                }
2908
            }
2909
        }
2910
2911
0
        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
2912
2913
0
        mem::forget(guard);
2914
0
    }
2915
2916
    /// Deallocates the table without dropping any entries.
2917
    ///
2918
    /// # Note
2919
    ///
2920
    /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
2921
    /// else it can lead to leaking of memory. Also calling this function automatically
2922
    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
2923
    /// (dangling) the `ctrl` field of the table.
2924
    ///
2925
    /// # Safety
2926
    ///
2927
    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
2928
    ///
2929
    /// * The [`RawTableInner`] has already been allocated;
2930
    ///
2931
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2932
    ///   to allocate this table.
2933
    ///
2934
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
2935
    ///   to allocate this table.
2936
    ///
2937
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2938
    ///
2939
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2940
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2941
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2942
    #[inline]
2943
0
    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
2944
0
    where
2945
0
        A: Allocator,
2946
    {
2947
        // SAFETY: The caller must uphold the safety contract for `free_buckets`
2948
        // method.
2949
0
        let (ptr, layout) = self.allocation_info(table_layout);
2950
0
        alloc.deallocate(ptr, layout);
2951
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::free_buckets::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::free_buckets::<allocator_api2::stable::alloc::global::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::free_buckets::<_>
2952
2953
    /// Returns a pointer to the allocated memory and the layout that was used to
2954
    /// allocate the table.
2955
    ///
2956
    /// # Safety
2957
    ///
2958
    /// Caller of this function must observe the following safety rules:
2959
    ///
2960
    /// * The [`RawTableInner`] has already been allocated, otherwise
2961
    ///   calling this function results in [`undefined behavior`]
2962
    ///
2963
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2964
    ///   that was used to allocate this table. Failure to comply with this condition
2965
    ///   may result in [`undefined behavior`].
2966
    ///
2967
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2968
    ///
2969
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2970
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2971
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2972
    #[inline]
2973
0
    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
2974
0
        debug_assert!(
2975
0
            !self.is_empty_singleton(),
2976
0
            "this function can only be called on non-empty tables"
2977
        );
2978
2979
        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
2980
0
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
2981
0
            Some(lco) => lco,
2982
0
            None => unsafe { hint::unreachable_unchecked() },
2983
        };
2984
0
        (
2985
0
            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
2986
0
            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
2987
0
            layout,
2988
0
        )
2989
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::allocation_info
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::allocation_info
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::allocation_info
2990
2991
    /// Returns the total amount of memory allocated internally by the hash
2992
    /// table, in bytes.
2993
    ///
2994
    /// The returned number is informational only. It is intended to be
2995
    /// primarily used for memory profiling.
2996
    ///
2997
    /// # Safety
2998
    ///
2999
    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3000
    /// that was used to allocate this table. Failure to comply with this condition
3001
    /// may result in [`undefined behavior`].
3002
    ///
3003
    ///
3004
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3005
    #[inline]
3006
0
    unsafe fn allocation_size_or_zero(&self, table_layout: TableLayout) -> usize {
3007
0
        if self.is_empty_singleton() {
3008
0
            0
3009
        } else {
3010
            // SAFETY:
3011
            // 1. We have checked that our table is allocated.
3012
            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
3013
            // that was used to allocate this table.
3014
0
            unsafe { self.allocation_info(table_layout).1.size() }
3015
        }
3016
0
    }
3017
3018
    /// Marks all table buckets as empty without dropping their contents.
3019
    #[inline]
3020
0
    fn clear_no_drop(&mut self) {
3021
0
        if !self.is_empty_singleton() {
3022
0
            self.ctrl_slice().fill_empty();
3023
0
        }
3024
0
        self.items = 0;
3025
0
        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
3026
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::clear_no_drop
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::clear_no_drop
3027
3028
    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
3029
    /// triggered as full, decreases the `items` of the table and, if it can be done,
3030
    /// increases `self.growth_left`.
3031
    ///
3032
    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
3033
    /// does not make any changes to the `data` parts of the table. The caller of this
3034
    /// function must take care to properly drop the `data`, otherwise calling this
3035
    /// function may result in a memory leak.
3036
    ///
3037
    /// # Safety
3038
    ///
3039
    /// You must observe the following safety rules when calling this function:
3040
    ///
3041
    /// * The [`RawTableInner`] has already been allocated;
3042
    ///
3043
    /// * It must be the full control byte at the given position;
3044
    ///
3045
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
3046
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
3047
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
3048
    ///
3049
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
3050
    ///
3051
    /// Calling this function on a table with no elements is unspecified, but calling subsequent
3052
    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
3053
    /// (`self.items -= 1 cause overflow when self.items == 0`).
3054
    ///
3055
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
3056
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
3057
    ///
3058
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
3059
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
3060
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3061
    #[inline]
3062
0
    unsafe fn erase(&mut self, index: usize) {
3063
0
        debug_assert!(self.is_bucket_full(index));
3064
3065
        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3066
        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3067
0
        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3068
        // SAFETY:
3069
        // - The caller must uphold the safety contract for `erase` method;
3070
        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3071
0
        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3072
0
        let empty_after = Group::load(self.ctrl(index)).match_empty();
3073
3074
        // Inserting and searching in the map is performed by two key functions:
3075
        //
3076
        // - The `find_insert_slot` function that looks up the index of any `Tag::EMPTY` or `Tag::DELETED`
3077
        //   slot in a group to be able to insert. If it doesn't find an `Tag::EMPTY` or `Tag::DELETED`
3078
        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
3079
        //   and so on until it has gone through all the groups in the control bytes.
3080
        //
3081
        // - The `find_inner` function that looks for the index of the desired element by looking
3082
        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
3083
        //   there is no `Tag::EMPTY` byte in the group, then this means that the `find_insert_slot`
3084
        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
3085
        //   jumps further, and if it does not find the desired element and again there is no `Tag::EMPTY`
3086
        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
3087
        //   finds the desired element or hits an `Tag::EMPTY` slot/byte.
3088
        //
3089
        // Accordingly, this leads to two consequences:
3090
        //
3091
        // - The map must have `Tag::EMPTY` slots (bytes);
3092
        //
3093
        // - You can't just mark the byte to be erased as `Tag::EMPTY`, because otherwise the `find_inner`
3094
        //   function may stumble upon an `Tag::EMPTY` byte before finding the desired element and stop
3095
        //   searching.
3096
        //
3097
        // Thus it is necessary to check all bytes after and before the erased element. If we are in
3098
        // a contiguous `Group` of `FULL` or `Tag::DELETED` bytes (the number of `FULL` or `Tag::DELETED` bytes
3099
        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3100
        // `Tag::DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3101
        // is at least one `Tag::EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3102
        // upon an `Tag::EMPTY` byte, so we can safely mark our erased byte as `Tag::EMPTY` as well.
3103
        //
3104
        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3105
        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3106
        // cannot have `Tag::DELETED` bytes.
3107
        //
3108
        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3109
        // `trailing_zeros` refers to the bytes at the beginning of a group.
3110
0
        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3111
0
            Tag::DELETED
3112
        } else {
3113
0
            self.growth_left += 1;
3114
0
            Tag::EMPTY
3115
        };
3116
        // SAFETY: the caller must uphold the safety contract for `erase` method.
3117
0
        self.set_ctrl(index, ctrl);
3118
0
        self.items -= 1;
3119
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::erase
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::erase
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::erase
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::erase
3120
}
3121
3122
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
3123
0
    fn clone(&self) -> Self {
3124
0
        if self.table.is_empty_singleton() {
3125
0
            Self::new_in(self.alloc.clone())
3126
        } else {
3127
            unsafe {
3128
                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3129
                //
3130
                // SAFETY: This is safe as we are taking the size of an already allocated table
3131
                // and therefore capacity overflow cannot occur, `self.table.buckets()` is power
3132
                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3133
0
                let mut new_table = match Self::new_uninitialized(
3134
0
                    self.alloc.clone(),
3135
0
                    self.table.buckets(),
3136
0
                    Fallibility::Infallible,
3137
0
                ) {
3138
0
                    Ok(table) => table,
3139
0
                    Err(_) => hint::unreachable_unchecked(),
3140
                };
3141
3142
                // Cloning elements may fail (the clone function may panic). But we don't
3143
                // need to worry about uninitialized control bits, since:
3144
                // 1. The number of items (elements) in the table is zero, which means that
3145
                //    the control bits will not be read by Drop function.
3146
                // 2. The `clone_from_spec` method will first copy all control bits from
3147
                //    `self` (thus initializing them). But this will not affect the `Drop`
3148
                //    function, since the `clone_from_spec` function sets `items` only after
3149
                //    successfully cloning all elements.
3150
0
                new_table.clone_from_spec(self);
3151
0
                new_table
3152
            }
3153
        }
3154
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize> as core::clone::Clone>::clone
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _> as core::clone::Clone>::clone
3155
3156
0
    fn clone_from(&mut self, source: &Self) {
3157
0
        if source.table.is_empty_singleton() {
3158
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3159
0
            unsafe {
3160
0
                // SAFETY:
3161
0
                // 1. We call the function only once;
3162
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3163
0
                //    and [`TableLayout`] that were used to allocate this table.
3164
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
3165
0
                //    because we have replaced the inner table with a new one.
3166
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3167
0
            }
3168
        } else {
3169
            unsafe {
3170
                // Make sure that if any panics occurs, we clear the table and
3171
                // leave it in an empty state.
3172
0
                let mut self_ = guard(self, |self_| {
3173
0
                    self_.clear_no_drop();
3174
0
                });
3175
3176
                // First, drop all our elements without clearing the control
3177
                // bytes. If this panics then the scope guard will clear the
3178
                // table, leaking any elements that were not dropped yet.
3179
                //
3180
                // This leak is unavoidable: we can't try dropping more elements
3181
                // since this could lead to another panic and abort the process.
3182
                //
3183
                // SAFETY: If something gets wrong we clear our table right after
3184
                // dropping the elements, so there is no double drop, since `items`
3185
                // will be equal to zero.
3186
0
                self_.table.drop_elements::<T>();
3187
3188
                // If necessary, resize our table to match the source.
3189
0
                if self_.buckets() != source.buckets() {
3190
0
                    let new_inner = match RawTableInner::new_uninitialized(
3191
0
                        &self_.alloc,
3192
0
                        Self::TABLE_LAYOUT,
3193
0
                        source.buckets(),
3194
0
                        Fallibility::Infallible,
3195
0
                    ) {
3196
0
                        Ok(table) => table,
3197
0
                        Err(_) => hint::unreachable_unchecked(),
3198
                    };
3199
                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3200
                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3201
0
                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3202
0
                    if !old_inner.is_empty_singleton() {
3203
0
                        // SAFETY:
3204
0
                        // 1. We have checked that our table is allocated.
3205
0
                        // 2. We know for sure that `alloc` and `table_layout` matches
3206
0
                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3207
0
                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3208
0
                    }
3209
0
                }
3210
3211
                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3212
                // inside the `clone_from_impl` function will take care of that, dropping all
3213
                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3214
0
                self_.clone_from_spec(source);
3215
3216
                // Disarm the scope guard if cloning was successful.
3217
0
                ScopeGuard::into_inner(self_);
3218
            }
3219
        }
3220
0
    }
3221
}
3222
3223
/// Specialization of `clone_from` for `Copy` types
3224
trait RawTableClone {
3225
    unsafe fn clone_from_spec(&mut self, source: &Self);
3226
}
3227
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3228
    default_fn! {
3229
        #[cfg_attr(feature = "inline-more", inline)]
3230
0
        unsafe fn clone_from_spec(&mut self, source: &Self) {
3231
0
            self.clone_from_impl(source);
3232
0
        }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize> as hashbrown::raw::RawTableClone>::clone_from_spec
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _> as hashbrown::raw::RawTableClone>::clone_from_spec
3233
    }
3234
}
3235
#[cfg(feature = "nightly")]
3236
impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3237
    #[cfg_attr(feature = "inline-more", inline)]
3238
    unsafe fn clone_from_spec(&mut self, source: &Self) {
3239
        source
3240
            .table
3241
            .ctrl(0)
3242
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3243
        source
3244
            .data_start()
3245
            .as_ptr()
3246
            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3247
3248
        self.table.items = source.table.items;
3249
        self.table.growth_left = source.table.growth_left;
3250
    }
3251
}
3252
3253
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
3254
    /// Common code for `clone` and `clone_from`. Assumes:
3255
    /// - `self.buckets() == source.buckets()`.
3256
    /// - Any existing elements have been dropped.
3257
    /// - The control bytes are not initialized yet.
3258
    #[cfg_attr(feature = "inline-more", inline)]
3259
0
    unsafe fn clone_from_impl(&mut self, source: &Self) {
3260
        // Copy the control bytes unchanged. We do this in a single pass
3261
0
        source
3262
0
            .table
3263
0
            .ctrl(0)
3264
0
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3265
3266
        // The cloning of elements may panic, in which case we need
3267
        // to make sure we drop only the elements that have been
3268
        // cloned so far.
3269
0
        let mut guard = guard((0, &mut *self), |(index, self_)| {
3270
0
            if T::NEEDS_DROP {
3271
0
                for i in 0..*index {
3272
0
                    if self_.is_bucket_full(i) {
3273
0
                        self_.bucket(i).drop();
3274
0
                    }
3275
                }
3276
0
            }
3277
0
        });
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::clone_from_impl::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::clone_from_impl::{closure#0}
3278
3279
0
        for from in source.iter() {
3280
0
            let index = source.bucket_index(&from);
3281
0
            let to = guard.1.bucket(index);
3282
0
            to.write(from.as_ref().clone());
3283
0
3284
0
            // Update the index in case we need to unwind.
3285
0
            guard.0 = index + 1;
3286
0
        }
3287
3288
        // Successfully cloned all items, no need to clean up.
3289
0
        mem::forget(guard);
3290
3291
0
        self.table.items = source.table.items;
3292
0
        self.table.growth_left = source.table.growth_left;
3293
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::clone_from_impl
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::clone_from_impl
3294
}
3295
3296
impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3297
    #[inline]
3298
0
    fn default() -> Self {
3299
0
        Self::new_in(Default::default())
3300
0
    }
3301
}
3302
3303
#[cfg(feature = "nightly")]
3304
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3305
    #[cfg_attr(feature = "inline-more", inline)]
3306
    fn drop(&mut self) {
3307
        unsafe {
3308
            // SAFETY:
3309
            // 1. We call the function only once;
3310
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3311
            //    and [`TableLayout`] that were used to allocate this table.
3312
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3313
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3314
            //    so there won't be any table left in an inconsistent state.
3315
            self.table
3316
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3317
        }
3318
    }
3319
}
3320
#[cfg(not(feature = "nightly"))]
3321
impl<T, A: Allocator> Drop for RawTable<T, A> {
3322
    #[cfg_attr(feature = "inline-more", inline)]
3323
0
    fn drop(&mut self) {
3324
0
        unsafe {
3325
0
            // SAFETY:
3326
0
            // 1. We call the function only once;
3327
0
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3328
0
            //    and [`TableLayout`] that were used to allocate this table.
3329
0
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3330
0
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3331
0
            //    so there won't be any table left in an inconsistent state.
3332
0
            self.table
3333
0
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3334
0
        }
3335
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawTable<usize> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawTable<usize> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _> as core::ops::drop::Drop>::drop
3336
}
3337
3338
impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3339
    type Item = T;
3340
    type IntoIter = RawIntoIter<T, A>;
3341
3342
    #[cfg_attr(feature = "inline-more", inline)]
3343
0
    fn into_iter(self) -> RawIntoIter<T, A> {
3344
        unsafe {
3345
0
            let iter = self.iter();
3346
0
            self.into_iter_from(iter)
3347
        }
3348
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)> as core::iter::traits::collect::IntoIterator>::into_iter
Unexecuted instantiation: <hashbrown::raw::RawTable<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))> as core::iter::traits::collect::IntoIterator>::into_iter
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _> as core::iter::traits::collect::IntoIterator>::into_iter
3349
}
3350
3351
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3352
/// not track an item count.
3353
pub(crate) struct RawIterRange<T> {
3354
    // Mask of full buckets in the current group. Bits are cleared from this
3355
    // mask as each element is processed.
3356
    current_group: BitMaskIter,
3357
3358
    // Pointer to the buckets for the current group.
3359
    data: Bucket<T>,
3360
3361
    // Pointer to the next group of control bytes,
3362
    // Must be aligned to the group size.
3363
    next_ctrl: *const u8,
3364
3365
    // Pointer one past the last control byte of this range.
3366
    end: *const u8,
3367
}
3368
3369
impl<T> RawIterRange<T> {
3370
    /// Returns a `RawIterRange` covering a subset of a table.
3371
    ///
3372
    /// # Safety
3373
    ///
3374
    /// If any of the following conditions are violated, the result is
3375
    /// [`undefined behavior`]:
3376
    ///
3377
    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3378
    ///
3379
    /// * `ctrl` must be properly aligned to the group size (`Group::WIDTH`);
3380
    ///
3381
    /// * `ctrl` must point to the array of properly initialized control bytes;
3382
    ///
3383
    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3384
    ///
3385
    /// * the value of `len` must be less than or equal to the number of table buckets,
3386
    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3387
    ///   must be positive.
3388
    ///
3389
    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3390
    ///   byte past the end of the same [allocated table].
3391
    ///
3392
    /// * The `len` must be a power of two.
3393
    ///
3394
    /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
3395
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3396
    #[cfg_attr(feature = "inline-more", inline)]
3397
0
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3398
0
        debug_assert_ne!(len, 0);
3399
0
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3400
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3401
0
        let end = ctrl.add(len);
3402
3403
        // Load the first group and advance ctrl to point to the next group
3404
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3405
0
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3406
0
        let next_ctrl = ctrl.add(Group::WIDTH);
3407
3408
0
        Self {
3409
0
            current_group: current_group.into_iter(),
3410
0
            data,
3411
0
            next_ctrl,
3412
0
            end,
3413
0
        }
3414
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIterRange<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::new
Unexecuted instantiation: <hashbrown::raw::RawIterRange<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::new
Unexecuted instantiation: <hashbrown::raw::RawIterRange<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::new
Unexecuted instantiation: <hashbrown::raw::RawIterRange<usize>>::new
Unexecuted instantiation: <hashbrown::raw::RawIterRange<_>>::new
3415
3416
    /// Splits a `RawIterRange` into two halves.
3417
    ///
3418
    /// Returns `None` if the remaining range is smaller than or equal to the
3419
    /// group width.
3420
    #[cfg_attr(feature = "inline-more", inline)]
3421
    #[cfg(feature = "rayon")]
3422
    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3423
        unsafe {
3424
            if self.end <= self.next_ctrl {
3425
                // Nothing to split if the group that we are current processing
3426
                // is the last one.
3427
                (self, None)
3428
            } else {
3429
                // len is the remaining number of elements after the group that
3430
                // we are currently processing. It must be a multiple of the
3431
                // group size (small tables are caught by the check above).
3432
                let len = offset_from(self.end, self.next_ctrl);
3433
                debug_assert_eq!(len % Group::WIDTH, 0);
3434
3435
                // Split the remaining elements into two halves, but round the
3436
                // midpoint down in case there is an odd number of groups
3437
                // remaining. This ensures that:
3438
                // - The tail is at least 1 group long.
3439
                // - The split is roughly even considering we still have the
3440
                //   current group to process.
3441
                let mid = (len / 2) & !(Group::WIDTH - 1);
3442
3443
                let tail = Self::new(
3444
                    self.next_ctrl.add(mid),
3445
                    self.data.next_n(Group::WIDTH).next_n(mid),
3446
                    len - mid,
3447
                );
3448
                debug_assert_eq!(
3449
                    self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3450
                    tail.data.ptr
3451
                );
3452
                debug_assert_eq!(self.end, tail.end);
3453
                self.end = self.next_ctrl.add(mid);
3454
                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3455
                (self, Some(tail))
3456
            }
3457
        }
3458
    }
3459
3460
    /// # Safety
3461
    /// If `DO_CHECK_PTR_RANGE` is false, caller must ensure that we never try to iterate
3462
    /// after yielding all elements.
3463
    #[cfg_attr(feature = "inline-more", inline)]
3464
0
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3465
        loop {
3466
0
            if let Some(index) = self.current_group.next() {
3467
0
                return Some(self.data.next_n(index));
3468
0
            }
3469
3470
0
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3471
0
                return None;
3472
0
            }
3473
3474
            // We might read past self.end up to the next group boundary,
3475
            // but this is fine because it only occurs on tables smaller
3476
            // than the group size where the trailing control bytes are all
3477
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3478
            // to the group size (since tables are power-of-two sized).
3479
0
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3480
0
                .match_full()
3481
0
                .into_iter();
3482
0
            self.data = self.data.next_n(Group::WIDTH);
3483
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3484
        }
3485
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIterRange<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::next_impl::<false>
Unexecuted instantiation: <hashbrown::raw::RawIterRange<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::next_impl::<false>
Unexecuted instantiation: <hashbrown::raw::RawIterRange<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::next_impl::<false>
Unexecuted instantiation: <hashbrown::raw::RawIterRange<usize>>::next_impl::<false>
Unexecuted instantiation: <hashbrown::raw::RawIterRange<_>>::next_impl::<_>
3486
3487
    /// Folds every element into an accumulator by applying an operation,
3488
    /// returning the final result.
3489
    ///
3490
    /// `fold_impl()` takes three arguments: the number of items remaining in
3491
    /// the iterator, an initial value, and a closure with two arguments: an
3492
    /// 'accumulator', and an element. The closure returns the value that the
3493
    /// accumulator should have for the next iteration.
3494
    ///
3495
    /// The initial value is the value the accumulator will have on the first call.
3496
    ///
3497
    /// After applying this closure to every element of the iterator, `fold_impl()`
3498
    /// returns the accumulator.
3499
    ///
3500
    /// # Safety
3501
    ///
3502
    /// If any of the following conditions are violated, the result is
3503
    /// [`Undefined Behavior`]:
3504
    ///
3505
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3506
    ///   i.e. table outlives the `RawIterRange`;
3507
    ///
3508
    /// * The provided `n` value must match the actual number of items
3509
    ///   in the table.
3510
    ///
3511
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3512
    #[allow(clippy::while_let_on_iterator)]
3513
    #[cfg_attr(feature = "inline-more", inline)]
3514
0
    unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
3515
0
    where
3516
0
        F: FnMut(B, Bucket<T>) -> B,
3517
    {
3518
        loop {
3519
0
            while let Some(index) = self.current_group.next() {
3520
                // The returned `index` will always be in the range `0..Group::WIDTH`,
3521
                // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
3522
0
                debug_assert!(n != 0);
3523
0
                let bucket = self.data.next_n(index);
3524
0
                acc = f(acc, bucket);
3525
0
                n -= 1;
3526
            }
3527
3528
0
            if n == 0 {
3529
0
                return acc;
3530
0
            }
3531
3532
            // SAFETY: The caller of this function ensures that:
3533
            //
3534
            // 1. The provided `n` value matches the actual number of items in the table;
3535
            // 2. The table is alive and did not moved.
3536
            //
3537
            // Taking the above into account, we always stay within the bounds, because:
3538
            //
3539
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3540
            //    we will never end up in the given branch, since we should have already
3541
            //    yielded all the elements of the table.
3542
            //
3543
            // 2. For tables larger than the group width. The number of buckets is a
3544
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3545
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3546
            //    start of the array of control bytes, and never try to iterate after
3547
            //    getting all the elements, the last `self.current_group` will read bytes
3548
            //    from the `self.buckets() - Group::WIDTH` index.  We know also that
3549
            //    `self.current_group.next()` will always return indices within the range
3550
            //    `0..Group::WIDTH`.
3551
            //
3552
            //    Knowing all of the above and taking into account that we are synchronizing
3553
            //    the `self.data` index with the index we used to read the `self.current_group`,
3554
            //    the subsequent `self.data.next_n(index)` will always return a bucket with
3555
            //    an index number less than `self.buckets()`.
3556
            //
3557
            //    The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
3558
            //    actually be read, since we should have already yielded all the elements of
3559
            //    the table.
3560
0
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3561
0
                .match_full()
3562
0
                .into_iter();
3563
0
            self.data = self.data.next_n(Group::WIDTH);
3564
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3565
        }
3566
0
    }
3567
}
3568
3569
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3570
// in the actual iterator implementations determine the real Send/Sync bounds.
3571
unsafe impl<T> Send for RawIterRange<T> {}
3572
unsafe impl<T> Sync for RawIterRange<T> {}
3573
3574
impl<T> Clone for RawIterRange<T> {
3575
    #[cfg_attr(feature = "inline-more", inline)]
3576
0
    fn clone(&self) -> Self {
3577
0
        Self {
3578
0
            data: self.data.clone(),
3579
0
            next_ctrl: self.next_ctrl,
3580
0
            current_group: self.current_group.clone(),
3581
0
            end: self.end,
3582
0
        }
3583
0
    }
3584
}
3585
3586
impl<T> Iterator for RawIterRange<T> {
3587
    type Item = Bucket<T>;
3588
3589
    #[cfg_attr(feature = "inline-more", inline)]
3590
0
    fn next(&mut self) -> Option<Bucket<T>> {
3591
        unsafe {
3592
            // SAFETY: We set checker flag to true.
3593
0
            self.next_impl::<true>()
3594
        }
3595
0
    }
3596
3597
    #[inline]
3598
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3599
        // We don't have an item count, so just guess based on the range size.
3600
0
        let remaining_buckets = if self.end > self.next_ctrl {
3601
0
            unsafe { offset_from(self.end, self.next_ctrl) }
3602
        } else {
3603
0
            0
3604
        };
3605
3606
        // Add a group width to include the group we are currently processing.
3607
0
        (0, Some(Group::WIDTH + remaining_buckets))
3608
0
    }
3609
}
3610
3611
impl<T> FusedIterator for RawIterRange<T> {}
3612
3613
/// Iterator which returns a raw pointer to every full bucket in the table.
3614
///
3615
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3616
/// must observe several rules when using it:
3617
/// - You must not free the hash table while iterating (including via growing/shrinking).
3618
/// - It is fine to erase a bucket that has been yielded by the iterator.
3619
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3620
///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3621
/// - It is unspecified whether an element inserted after the iterator was
3622
///   created will be yielded by that iterator (unless `reflect_insert` is called).
3623
/// - The order in which the iterator yields bucket is unspecified and may
3624
///   change in the future.
3625
pub struct RawIter<T> {
3626
    pub(crate) iter: RawIterRange<T>,
3627
    items: usize,
3628
}
3629
3630
impl<T> RawIter<T> {
3631
0
    unsafe fn drop_elements(&mut self) {
3632
0
        if T::NEEDS_DROP && self.items != 0 {
3633
0
            for item in self {
3634
0
                item.drop();
3635
0
            }
3636
0
        }
3637
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIter<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)>>::drop_elements
Unexecuted instantiation: <hashbrown::raw::RawIter<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)>>::drop_elements
Unexecuted instantiation: <hashbrown::raw::RawIter<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))>>::drop_elements
Unexecuted instantiation: <hashbrown::raw::RawIter<_>>::drop_elements
3638
}
3639
3640
impl<T> Clone for RawIter<T> {
3641
    #[cfg_attr(feature = "inline-more", inline)]
3642
0
    fn clone(&self) -> Self {
3643
0
        Self {
3644
0
            iter: self.iter.clone(),
3645
0
            items: self.items,
3646
0
        }
3647
0
    }
3648
}
3649
impl<T> Default for RawIter<T> {
3650
    #[cfg_attr(feature = "inline-more", inline)]
3651
0
    fn default() -> Self {
3652
        // SAFETY: Because the table is static, it always outlives the iter.
3653
0
        unsafe { RawTableInner::NEW.iter() }
3654
0
    }
3655
}
3656
3657
impl<T> Iterator for RawIter<T> {
3658
    type Item = Bucket<T>;
3659
3660
    #[cfg_attr(feature = "inline-more", inline)]
3661
0
    fn next(&mut self) -> Option<Bucket<T>> {
3662
        // Inner iterator iterates over buckets
3663
        // so it can do unnecessary work if we already yielded all items.
3664
0
        if self.items == 0 {
3665
0
            return None;
3666
0
        }
3667
3668
0
        let nxt = unsafe {
3669
            // SAFETY: We check number of items to yield using `items` field.
3670
0
            self.iter.next_impl::<false>()
3671
        };
3672
3673
0
        debug_assert!(nxt.is_some());
3674
0
        self.items -= 1;
3675
3676
0
        nxt
3677
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIter<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::RawIter<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::RawIter<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::RawIter<usize> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::RawIter<_> as core::iter::traits::iterator::Iterator>::next
3678
3679
    #[inline]
3680
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3681
0
        (self.items, Some(self.items))
3682
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIter<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)> as core::iter::traits::iterator::Iterator>::size_hint
Unexecuted instantiation: <hashbrown::raw::RawIter<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))> as core::iter::traits::iterator::Iterator>::size_hint
Unexecuted instantiation: <hashbrown::raw::RawIter<_> as core::iter::traits::iterator::Iterator>::size_hint
3683
3684
    #[inline]
3685
0
    fn fold<B, F>(self, init: B, f: F) -> B
3686
0
    where
3687
0
        Self: Sized,
3688
0
        F: FnMut(B, Self::Item) -> B,
3689
    {
3690
0
        unsafe { self.iter.fold_impl(self.items, init, f) }
3691
0
    }
3692
}
3693
3694
impl<T> ExactSizeIterator for RawIter<T> {}
3695
impl<T> FusedIterator for RawIter<T> {}
3696
3697
/// Iterator which returns an index of every full bucket in the table.
3698
///
3699
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3700
/// must observe several rules when using it:
3701
/// - You must not free the hash table while iterating (including via growing/shrinking).
3702
/// - It is fine to erase a bucket that has been yielded by the iterator.
3703
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3704
///   result in the iterator yielding index of that bucket.
3705
/// - It is unspecified whether an element inserted after the iterator was
3706
///   created will be yielded by that iterator.
3707
/// - The order in which the iterator yields indices of the buckets is unspecified
3708
///   and may change in the future.
3709
pub(crate) struct FullBucketsIndices {
3710
    // Mask of full buckets in the current group. Bits are cleared from this
3711
    // mask as each element is processed.
3712
    current_group: BitMaskIter,
3713
3714
    // Initial value of the bytes' indices of the current group (relative
3715
    // to the start of the control bytes).
3716
    group_first_index: usize,
3717
3718
    // Pointer to the current group of control bytes,
3719
    // Must be aligned to the group size (Group::WIDTH).
3720
    ctrl: NonNull<u8>,
3721
3722
    // Number of elements in the table.
3723
    items: usize,
3724
}
3725
3726
impl FullBucketsIndices {
3727
    /// Advances the iterator and returns the next value.
3728
    ///
3729
    /// # Safety
3730
    ///
3731
    /// If any of the following conditions are violated, the result is
3732
    /// [`Undefined Behavior`]:
3733
    ///
3734
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3735
    ///   i.e. table outlives the `FullBucketsIndices`;
3736
    ///
3737
    /// * It never tries to iterate after getting all elements.
3738
    ///
3739
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3740
    #[inline(always)]
3741
0
    unsafe fn next_impl(&mut self) -> Option<usize> {
3742
        loop {
3743
0
            if let Some(index) = self.current_group.next() {
3744
                // The returned `self.group_first_index + index` will always
3745
                // be in the range `0..self.buckets()`. See explanation below.
3746
0
                return Some(self.group_first_index + index);
3747
0
            }
3748
3749
            // SAFETY: The caller of this function ensures that:
3750
            //
3751
            // 1. It never tries to iterate after getting all the elements;
3752
            // 2. The table is alive and did not moved;
3753
            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
3754
            //
3755
            // Taking the above into account, we always stay within the bounds, because:
3756
            //
3757
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3758
            //    we will never end up in the given branch, since we should have already
3759
            //    yielded all the elements of the table.
3760
            //
3761
            // 2. For tables larger than the group width. The number of buckets is a
3762
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3763
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3764
            //    the start of the array of control bytes, and never try to iterate after
3765
            //    getting all the elements, the last `self.ctrl` will be equal to
3766
            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
3767
            //    will always contains indices within the range `0..Group::WIDTH`,
3768
            //    and subsequent `self.group_first_index + index` will always return a
3769
            //    number less than `self.buckets()`.
3770
0
            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
3771
3772
            // SAFETY: See explanation above.
3773
0
            self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast())
3774
0
                .match_full()
3775
0
                .into_iter();
3776
0
            self.group_first_index += Group::WIDTH;
3777
        }
3778
0
    }
3779
}
3780
3781
impl Iterator for FullBucketsIndices {
3782
    type Item = usize;
3783
3784
    /// Advances the iterator and returns the next value. It is up to
3785
    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
3786
    /// because we cannot make the `next` method unsafe.
3787
    #[inline(always)]
3788
0
    fn next(&mut self) -> Option<usize> {
3789
        // Return if we already yielded all items.
3790
0
        if self.items == 0 {
3791
0
            return None;
3792
0
        }
3793
3794
0
        let nxt = unsafe {
3795
            // SAFETY:
3796
            // 1. We check number of items to yield using `items` field.
3797
            // 2. The caller ensures that the table is alive and has not moved.
3798
0
            self.next_impl()
3799
        };
3800
3801
0
        debug_assert!(nxt.is_some());
3802
0
        self.items -= 1;
3803
3804
0
        nxt
3805
0
    }
3806
3807
    #[inline(always)]
3808
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3809
0
        (self.items, Some(self.items))
3810
0
    }
3811
}
3812
3813
impl ExactSizeIterator for FullBucketsIndices {}
3814
impl FusedIterator for FullBucketsIndices {}
3815
3816
/// Iterator which consumes a table and returns elements.
3817
pub struct RawIntoIter<T, A: Allocator = Global> {
3818
    iter: RawIter<T>,
3819
    allocation: Option<(NonNull<u8>, Layout, A)>,
3820
    marker: PhantomData<T>,
3821
}
3822
3823
impl<T, A: Allocator> RawIntoIter<T, A> {
3824
    #[cfg_attr(feature = "inline-more", inline)]
3825
0
    pub fn iter(&self) -> RawIter<T> {
3826
0
        self.iter.clone()
3827
0
    }
3828
}
3829
3830
unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
3831
where
3832
    T: Send,
3833
    A: Send,
3834
{
3835
}
3836
unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
3837
where
3838
    T: Sync,
3839
    A: Sync,
3840
{
3841
}
3842
3843
#[cfg(feature = "nightly")]
3844
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
3845
    #[cfg_attr(feature = "inline-more", inline)]
3846
    fn drop(&mut self) {
3847
        unsafe {
3848
            // Drop all remaining elements
3849
            self.iter.drop_elements();
3850
3851
            // Free the table
3852
            if let Some((ptr, layout, ref alloc)) = self.allocation {
3853
                alloc.deallocate(ptr, layout);
3854
            }
3855
        }
3856
    }
3857
}
3858
#[cfg(not(feature = "nightly"))]
3859
impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
3860
    #[cfg_attr(feature = "inline-more", inline)]
3861
0
    fn drop(&mut self) {
3862
        unsafe {
3863
            // Drop all remaining elements
3864
0
            self.iter.drop_elements();
3865
3866
            // Free the table
3867
0
            if let Some((ptr, layout, ref alloc)) = self.allocation {
3868
0
                alloc.deallocate(ptr, layout);
3869
0
            }
3870
        }
3871
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<_, _> as core::ops::drop::Drop>::drop
3872
}
3873
3874
impl<T, A: Allocator> Default for RawIntoIter<T, A> {
3875
0
    fn default() -> Self {
3876
0
        Self {
3877
0
            iter: Default::default(),
3878
0
            allocation: None,
3879
0
            marker: PhantomData,
3880
0
        }
3881
0
    }
3882
}
3883
impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
3884
    type Item = T;
3885
3886
    #[cfg_attr(feature = "inline-more", inline)]
3887
0
    fn next(&mut self) -> Option<T> {
3888
0
        unsafe { Some(self.iter.next()?.read()) }
3889
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<(ztunnel::inpod::WorkloadUid, (ztunnel::state::WorkloadInfo, ztunnel::inpod::netns::InpodNetns))> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<_, _> as core::iter::traits::iterator::Iterator>::next
3890
3891
    #[inline]
3892
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3893
0
        self.iter.size_hint()
3894
0
    }
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState)> as core::iter::traits::iterator::Iterator>::size_hint
Unexecuted instantiation: <hashbrown::raw::RawIntoIter<_, _> as core::iter::traits::iterator::Iterator>::size_hint
3895
}
3896
3897
impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
3898
impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
3899
3900
/// Iterator which consumes elements without freeing the table storage.
3901
pub struct RawDrain<'a, T, A: Allocator = Global> {
3902
    iter: RawIter<T>,
3903
3904
    // The table is moved into the iterator for the duration of the drain. This
3905
    // ensures that an empty table is left if the drain iterator is leaked
3906
    // without dropping.
3907
    table: RawTableInner,
3908
    orig_table: NonNull<RawTableInner>,
3909
3910
    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
3911
    // covariant over T.
3912
    marker: PhantomData<&'a RawTable<T, A>>,
3913
}
3914
3915
impl<T, A: Allocator> RawDrain<'_, T, A> {
3916
    #[cfg_attr(feature = "inline-more", inline)]
3917
0
    pub fn iter(&self) -> RawIter<T> {
3918
0
        self.iter.clone()
3919
0
    }
3920
}
3921
3922
unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
3923
where
3924
    T: Send,
3925
    A: Send,
3926
{
3927
}
3928
unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
3929
where
3930
    T: Sync,
3931
    A: Sync,
3932
{
3933
}
3934
3935
impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
3936
    #[cfg_attr(feature = "inline-more", inline)]
3937
0
    fn drop(&mut self) {
3938
0
        unsafe {
3939
0
            // Drop all remaining elements. Note that this may panic.
3940
0
            self.iter.drop_elements();
3941
0
3942
0
            // Reset the contents of the table now that all elements have been
3943
0
            // dropped.
3944
0
            self.table.clear_no_drop();
3945
0
3946
0
            // Move the now empty table back to its original location.
3947
0
            self.orig_table
3948
0
                .as_ptr()
3949
0
                .copy_from_nonoverlapping(&self.table, 1);
3950
0
        }
3951
0
    }
Unexecuted instantiation: <hashbrown::raw::RawDrain<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawDrain<_, _> as core::ops::drop::Drop>::drop
3952
}
3953
3954
impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
3955
    type Item = T;
3956
3957
    #[cfg_attr(feature = "inline-more", inline)]
3958
0
    fn next(&mut self) -> Option<T> {
3959
        unsafe {
3960
0
            let item = self.iter.next()?;
3961
0
            Some(item.read())
3962
        }
3963
0
    }
Unexecuted instantiation: <hashbrown::raw::RawDrain<(lru::KeyRef<i32>, core::ptr::non_null::NonNull<lru::LruEntry<i32, pingora_pool::lru::Node<pingora_pool::connection::ConnectionMeta>>>)> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::RawDrain<_, _> as core::iter::traits::iterator::Iterator>::next
3964
3965
    #[inline]
3966
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3967
0
        self.iter.size_hint()
3968
0
    }
3969
}
3970
3971
impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
3972
impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
3973
3974
/// Iterator over occupied buckets that could match a given hash.
3975
///
3976
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
3977
/// items that have a hash value different than the one provided. You should
3978
/// always validate the returned values before using them.
3979
///
3980
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3981
/// must observe several rules when using it:
3982
/// - You must not free the hash table while iterating (including via growing/shrinking).
3983
/// - It is fine to erase a bucket that has been yielded by the iterator.
3984
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3985
///   result in the iterator yielding that bucket.
3986
/// - It is unspecified whether an element inserted after the iterator was
3987
///   created will be yielded by that iterator.
3988
/// - The order in which the iterator yields buckets is unspecified and may
3989
///   change in the future.
3990
pub struct RawIterHash<T> {
3991
    inner: RawIterHashInner,
3992
    _marker: PhantomData<T>,
3993
}
3994
3995
#[derive(Clone)]
3996
struct RawIterHashInner {
3997
    // See `RawTableInner`'s corresponding fields for details.
3998
    // We can't store a `*const RawTableInner` as it would get
3999
    // invalidated by the user calling `&mut` methods on `RawTable`.
4000
    bucket_mask: usize,
4001
    ctrl: NonNull<u8>,
4002
4003
    // The top 7 bits of the hash.
4004
    tag_hash: Tag,
4005
4006
    // The sequence of groups to probe in the search.
4007
    probe_seq: ProbeSeq,
4008
4009
    group: Group,
4010
4011
    // The elements within the group with a matching tag-hash.
4012
    bitmask: BitMaskIter,
4013
}
4014
4015
impl<T> RawIterHash<T> {
4016
    #[cfg_attr(feature = "inline-more", inline)]
4017
0
    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
4018
0
        RawIterHash {
4019
0
            inner: RawIterHashInner::new(&table.table, hash),
4020
0
            _marker: PhantomData,
4021
0
        }
4022
0
    }
4023
}
4024
4025
impl<T> Clone for RawIterHash<T> {
4026
    #[cfg_attr(feature = "inline-more", inline)]
4027
0
    fn clone(&self) -> Self {
4028
0
        Self {
4029
0
            inner: self.inner.clone(),
4030
0
            _marker: PhantomData,
4031
0
        }
4032
0
    }
4033
}
4034
4035
impl<T> Default for RawIterHash<T> {
4036
    #[cfg_attr(feature = "inline-more", inline)]
4037
0
    fn default() -> Self {
4038
0
        Self {
4039
0
            // SAFETY: Because the table is static, it always outlives the iter.
4040
0
            inner: unsafe { RawIterHashInner::new(&RawTableInner::NEW, 0) },
4041
0
            _marker: PhantomData,
4042
0
        }
4043
0
    }
4044
}
4045
4046
impl RawIterHashInner {
4047
    #[cfg_attr(feature = "inline-more", inline)]
4048
0
    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
4049
0
        let tag_hash = Tag::full(hash);
4050
0
        let probe_seq = table.probe_seq(hash);
4051
0
        let group = Group::load(table.ctrl(probe_seq.pos));
4052
0
        let bitmask = group.match_tag(tag_hash).into_iter();
4053
4054
0
        RawIterHashInner {
4055
0
            bucket_mask: table.bucket_mask,
4056
0
            ctrl: table.ctrl,
4057
0
            tag_hash,
4058
0
            probe_seq,
4059
0
            group,
4060
0
            bitmask,
4061
0
        }
4062
0
    }
4063
}
4064
4065
impl<T> Iterator for RawIterHash<T> {
4066
    type Item = Bucket<T>;
4067
4068
0
    fn next(&mut self) -> Option<Bucket<T>> {
4069
        unsafe {
4070
0
            match self.inner.next() {
4071
0
                Some(index) => {
4072
                    // Can't use `RawTable::bucket` here as we don't have
4073
                    // an actual `RawTable` reference to use.
4074
0
                    debug_assert!(index <= self.inner.bucket_mask);
4075
0
                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4076
0
                    Some(bucket)
4077
                }
4078
0
                None => None,
4079
            }
4080
        }
4081
0
    }
4082
}
4083
4084
impl Iterator for RawIterHashInner {
4085
    type Item = usize;
4086
4087
0
    fn next(&mut self) -> Option<Self::Item> {
4088
        unsafe {
4089
            loop {
4090
0
                if let Some(bit) = self.bitmask.next() {
4091
0
                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4092
0
                    return Some(index);
4093
0
                }
4094
0
                if likely(self.group.match_empty().any_bit_set()) {
4095
0
                    return None;
4096
0
                }
4097
0
                self.probe_seq.move_next(self.bucket_mask);
4098
4099
                // Can't use `RawTableInner::ctrl` here as we don't have
4100
                // an actual `RawTableInner` reference to use.
4101
0
                let index = self.probe_seq.pos;
4102
0
                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4103
0
                let group_ctrl = self.ctrl.as_ptr().add(index).cast();
4104
4105
0
                self.group = Group::load(group_ctrl);
4106
0
                self.bitmask = self.group.match_tag(self.tag_hash).into_iter();
4107
            }
4108
        }
4109
0
    }
4110
}
4111
4112
pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
4113
    pub iter: RawIter<T>,
4114
    pub table: &'a mut RawTable<T, A>,
4115
}
4116
4117
impl<T, A: Allocator> RawExtractIf<'_, T, A> {
4118
    #[cfg_attr(feature = "inline-more", inline)]
4119
0
    pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
4120
0
    where
4121
0
        F: FnMut(&mut T) -> bool,
4122
    {
4123
        unsafe {
4124
0
            for item in &mut self.iter {
4125
0
                if f(item.as_mut()) {
4126
0
                    return Some(self.table.remove(item).0);
4127
0
                }
4128
            }
4129
        }
4130
0
        None
4131
0
    }
Unexecuted instantiation: <hashbrown::raw::RawExtractIf<(ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState), allocator_api2::stable::alloc::global::Global>>::next::<<hashbrown::map::ExtractIf<ztunnel::inpod::WorkloadUid, ztunnel::inpod::statemanager::WorkloadState, <ztunnel::inpod::statemanager::WorkloadProxyManagerState>::reconcile::{closure#0}> as core::iter::traits::iterator::Iterator>::next::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawExtractIf<_, _>>::next::<_>
4132
}
4133
4134
#[cfg(test)]
4135
mod test_map {
4136
    use super::*;
4137
4138
    fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
4139
        unsafe {
4140
            table.table.rehash_in_place(
4141
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
4142
                mem::size_of::<T>(),
4143
                if mem::needs_drop::<T>() {
4144
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
4145
                } else {
4146
                    None
4147
                },
4148
            );
4149
        }
4150
    }
4151
4152
    #[test]
4153
    fn rehash() {
4154
        let mut table = RawTable::new();
4155
        let hasher = |i: &u64| *i;
4156
        for i in 0..100 {
4157
            table.insert(i, i, hasher);
4158
        }
4159
4160
        for i in 0..100 {
4161
            unsafe {
4162
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4163
            }
4164
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4165
        }
4166
4167
        rehash_in_place(&mut table, hasher);
4168
4169
        for i in 0..100 {
4170
            unsafe {
4171
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4172
            }
4173
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4174
        }
4175
    }
4176
4177
    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4178
    /// AN UNINITIALIZED TABLE DURING THE DROP
4179
    #[test]
4180
    fn test_drop_uninitialized() {
4181
        use ::alloc::vec::Vec;
4182
4183
        let table = unsafe {
4184
            // SAFETY: The `buckets` is power of two and we're not
4185
            // trying to actually use the returned RawTable.
4186
            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4187
                .unwrap()
4188
        };
4189
        drop(table);
4190
    }
4191
4192
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4193
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4194
    #[test]
4195
    fn test_drop_zero_items() {
4196
        use ::alloc::vec::Vec;
4197
        unsafe {
4198
            // SAFETY: The `buckets` is power of two and we're not
4199
            // trying to actually use the returned RawTable.
4200
            let mut table =
4201
                RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4202
                    .unwrap();
4203
4204
            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4205
4206
            // SAFETY: We checked that the table is allocated and therefore the table already has
4207
            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4208
            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4209
            table.table.ctrl_slice().fill_empty();
4210
4211
            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4212
            table.table.ctrl(0).write_bytes(0, table.capacity());
4213
4214
            // Fix up the trailing control bytes. See the comments in set_ctrl
4215
            // for the handling of tables smaller than the group width.
4216
            if table.buckets() < Group::WIDTH {
4217
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4218
                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4219
                // `Group::WIDTH` is safe
4220
                table
4221
                    .table
4222
                    .ctrl(0)
4223
                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4224
            } else {
4225
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4226
                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4227
                // to `self.buckets() == self.bucket_mask + 1` is safe
4228
                table
4229
                    .table
4230
                    .ctrl(0)
4231
                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4232
            }
4233
            drop(table);
4234
        }
4235
    }
4236
4237
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4238
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4239
    #[test]
4240
    fn test_catch_panic_clone_from() {
4241
        use ::alloc::sync::Arc;
4242
        use ::alloc::vec::Vec;
4243
        use allocator_api2::alloc::{AllocError, Allocator, Global};
4244
        use core::sync::atomic::{AtomicI8, Ordering};
4245
        use std::thread;
4246
4247
        struct MyAllocInner {
4248
            drop_count: Arc<AtomicI8>,
4249
        }
4250
4251
        #[derive(Clone)]
4252
        struct MyAlloc {
4253
            _inner: Arc<MyAllocInner>,
4254
        }
4255
4256
        impl Drop for MyAllocInner {
4257
            fn drop(&mut self) {
4258
                println!("MyAlloc freed.");
4259
                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4260
            }
4261
        }
4262
4263
        unsafe impl Allocator for MyAlloc {
4264
            fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
4265
                let g = Global;
4266
                g.allocate(layout)
4267
            }
4268
4269
            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4270
                let g = Global;
4271
                g.deallocate(ptr, layout)
4272
            }
4273
        }
4274
4275
        const DISARMED: bool = false;
4276
        const ARMED: bool = true;
4277
4278
        struct CheckedCloneDrop {
4279
            panic_in_clone: bool,
4280
            dropped: bool,
4281
            need_drop: Vec<u64>,
4282
        }
4283
4284
        impl Clone for CheckedCloneDrop {
4285
            fn clone(&self) -> Self {
4286
                if self.panic_in_clone {
4287
                    panic!("panic in clone")
4288
                }
4289
                Self {
4290
                    panic_in_clone: self.panic_in_clone,
4291
                    dropped: self.dropped,
4292
                    need_drop: self.need_drop.clone(),
4293
                }
4294
            }
4295
        }
4296
4297
        impl Drop for CheckedCloneDrop {
4298
            fn drop(&mut self) {
4299
                if self.dropped {
4300
                    panic!("double drop");
4301
                }
4302
                self.dropped = true;
4303
            }
4304
        }
4305
4306
        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4307
4308
        let mut table = RawTable::new_in(MyAlloc {
4309
            _inner: Arc::new(MyAllocInner {
4310
                drop_count: dropped.clone(),
4311
            }),
4312
        });
4313
4314
        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4315
            let idx = idx as u64;
4316
            table.insert(
4317
                idx,
4318
                (
4319
                    idx,
4320
                    CheckedCloneDrop {
4321
                        panic_in_clone,
4322
                        dropped: false,
4323
                        need_drop: vec![idx],
4324
                    },
4325
                ),
4326
                |(k, _)| *k,
4327
            );
4328
        }
4329
4330
        assert_eq!(table.len(), 7);
4331
4332
        thread::scope(|s| {
4333
            let result = s.spawn(|| {
4334
                let armed_flags = [
4335
                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4336
                ];
4337
                let mut scope_table = RawTable::new_in(MyAlloc {
4338
                    _inner: Arc::new(MyAllocInner {
4339
                        drop_count: dropped.clone(),
4340
                    }),
4341
                });
4342
                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4343
                    let idx = idx as u64;
4344
                    scope_table.insert(
4345
                        idx,
4346
                        (
4347
                            idx,
4348
                            CheckedCloneDrop {
4349
                                panic_in_clone,
4350
                                dropped: false,
4351
                                need_drop: vec![idx + 100],
4352
                            },
4353
                        ),
4354
                        |(k, _)| *k,
4355
                    );
4356
                }
4357
                table.clone_from(&scope_table);
4358
            });
4359
            assert!(result.join().is_err());
4360
        });
4361
4362
        // Let's check that all iterators work fine and do not return elements
4363
        // (especially `RawIterRange`, which does not depend on the number of
4364
        // elements in the table, but looks directly at the control bytes)
4365
        //
4366
        // SAFETY: We know for sure that `RawTable` will outlive
4367
        // the returned `RawIter / RawIterRange` iterator.
4368
        assert_eq!(table.len(), 0);
4369
        assert_eq!(unsafe { table.iter().count() }, 0);
4370
        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4371
4372
        for idx in 0..table.buckets() {
4373
            let idx = idx as u64;
4374
            assert!(
4375
                table.find(idx, |(k, _)| *k == idx).is_none(),
4376
                "Index: {idx}"
4377
            );
4378
        }
4379
4380
        // All allocator clones should already be dropped.
4381
        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4382
    }
4383
}