Coverage Report

Created: 2025-07-23 07:29

/rust/registry/src/index.crates.io-6f17d22bba15001f/hashbrown-0.15.3/src/raw/mod.rs
Line
Count
Source (jump to first uncovered line)
1
use crate::alloc::alloc::{handle_alloc_error, Layout};
2
use crate::control::{BitMaskIter, Group, Tag, TagSliceExt};
3
use crate::scopeguard::{guard, ScopeGuard};
4
use crate::util::{invalid_mut, likely, unlikely};
5
use crate::TryReserveError;
6
use core::array;
7
use core::iter::FusedIterator;
8
use core::marker::PhantomData;
9
use core::mem;
10
use core::ptr::NonNull;
11
use core::slice;
12
use core::{hint, ptr};
13
14
mod alloc;
15
#[cfg(test)]
16
pub(crate) use self::alloc::AllocError;
17
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
18
19
#[inline]
20
337k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
337k
    to.offset_from(from) as usize
22
337k
}
hashbrown::raw::offset_from::<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>
Line
Count
Source
20
23.4k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
23.4k
    to.offset_from(from) as usize
22
23.4k
}
hashbrown::raw::offset_from::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>
Line
Count
Source
20
179k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
179k
    to.offset_from(from) as usize
22
179k
}
hashbrown::raw::offset_from::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>
Line
Count
Source
20
113k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
113k
    to.offset_from(from) as usize
22
113k
}
hashbrown::raw::offset_from::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>
Line
Count
Source
20
15.9k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
15.9k
    to.offset_from(from) as usize
22
15.9k
}
hashbrown::raw::offset_from::<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>
Line
Count
Source
20
5.27k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
5.27k
    to.offset_from(from) as usize
22
5.27k
}
23
24
/// Whether memory allocation errors should return an error or abort.
25
#[derive(Copy, Clone)]
26
enum Fallibility {
27
    Fallible,
28
    Infallible,
29
}
30
31
impl Fallibility {
32
    /// Error to return on capacity overflow.
33
    #[cfg_attr(feature = "inline-more", inline)]
34
0
    fn capacity_overflow(self) -> TryReserveError {
35
0
        match self {
36
0
            Fallibility::Fallible => TryReserveError::CapacityOverflow,
37
0
            Fallibility::Infallible => panic!("Hash table capacity overflow"),
38
        }
39
0
    }
40
41
    /// Error to return on allocation error.
42
    #[cfg_attr(feature = "inline-more", inline)]
43
0
    fn alloc_err(self, layout: Layout) -> TryReserveError {
44
0
        match self {
45
0
            Fallibility::Fallible => TryReserveError::AllocError { layout },
46
0
            Fallibility::Infallible => handle_alloc_error(layout),
47
        }
48
0
    }
49
}
50
51
trait SizedTypeProperties: Sized {
52
    const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
53
    const NEEDS_DROP: bool = mem::needs_drop::<Self>();
54
}
55
56
impl<T> SizedTypeProperties for T {}
57
58
/// Primary hash function, used to select the initial bucket to probe from.
59
#[inline]
60
#[allow(clippy::cast_possible_truncation)]
61
1.58M
fn h1(hash: u64) -> usize {
62
1.58M
    // On 32-bit platforms we simply ignore the higher hash bits.
63
1.58M
    hash as usize
64
1.58M
}
65
66
/// Probe sequence based on triangular numbers, which is guaranteed (since our
67
/// table size is a power of two) to visit every group of elements exactly once.
68
///
69
/// A triangular probe has us jump by 1 more group every time. So first we
70
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
71
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
72
///
73
/// Proof that the probe will visit every group in the table:
74
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
75
#[derive(Clone)]
76
struct ProbeSeq {
77
    pos: usize,
78
    stride: usize,
79
}
80
81
impl ProbeSeq {
82
    #[inline]
83
8
    fn move_next(&mut self, bucket_mask: usize) {
84
8
        // We should have found an empty bucket by now and ended the probe.
85
8
        debug_assert!(
86
0
            self.stride <= bucket_mask,
87
0
            "Went past end of probe sequence"
88
        );
89
90
8
        self.stride += Group::WIDTH;
91
8
        self.pos += self.stride;
92
8
        self.pos &= bucket_mask;
93
8
    }
94
}
95
96
/// Returns the number of buckets needed to hold the given number of items,
97
/// taking the maximum load factor into account.
98
///
99
/// Returns `None` if an overflow occurs.
100
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
101
#[cfg_attr(target_os = "emscripten", inline(never))]
102
#[cfg_attr(not(target_os = "emscripten"), inline)]
103
193k
fn capacity_to_buckets(cap: usize, table_layout: TableLayout) -> Option<usize> {
104
193k
    debug_assert_ne!(cap, 0);
105
106
    // For small tables we require at least 1 empty bucket so that lookups are
107
    // guaranteed to terminate if an element doesn't exist in the table.
108
193k
    if cap < 15 {
109
        // Consider a small TableLayout like { size: 1, ctrl_align: 16 } on a
110
        // platform with Group::WIDTH of 16 (like x86_64 with SSE2). For small
111
        // bucket sizes, this ends up wasting quite a few bytes just to pad to
112
        // the relatively larger ctrl_align:
113
        //
114
        // | capacity | buckets | bytes allocated | bytes per item |
115
        // | -------- | ------- | --------------- | -------------- |
116
        // |        3 |       4 |              36 | (Yikes!)  12.0 |
117
        // |        7 |       8 |              40 | (Poor)     5.7 |
118
        // |       14 |      16 |              48 |            3.4 |
119
        // |       28 |      32 |              80 |            3.3 |
120
        //
121
        // In general, buckets * table_layout.size >= table_layout.ctrl_align
122
        // must be true to avoid these edges. This is implemented by adjusting
123
        // the minimum capacity upwards for small items. This code only needs
124
        // to handle ctrl_align which are less than or equal to Group::WIDTH,
125
        // because valid layout sizes are always a multiple of the alignment,
126
        // so anything with alignment over the Group::WIDTH won't hit this edge
127
        // case.
128
129
        // This is brittle, e.g. if we ever add 32 byte groups, it will select
130
        // 3 regardless of the table_layout.size.
131
0
        let min_cap = match (Group::WIDTH, table_layout.size) {
132
0
            (16, 0..=1) => 14,
133
0
            (16, 2..=3) => 7,
134
0
            (8, 0..=1) => 7,
135
0
            _ => 3,
136
        };
137
0
        let cap = min_cap.max(cap);
138
0
        // We don't bother with a table size of 2 buckets since that can only
139
0
        // hold a single element. Instead, we skip directly to a 4 bucket table
140
0
        // which can hold 3 elements.
141
0
        return Some(if cap < 4 {
142
0
            4
143
0
        } else if cap < 8 {
144
0
            8
145
        } else {
146
0
            16
147
        });
148
193k
    }
149
150
    // Otherwise require 1/8 buckets to be empty (87.5% load)
151
    //
152
    // Be careful when modifying this, calculate_layout relies on the
153
    // overflow check here.
154
193k
    let adjusted_cap = cap.checked_mul(8)? / 7;
155
156
    // Any overflows will have been caught by the checked_mul. Also, any
157
    // rounding errors from the division above will be cleaned up by
158
    // next_power_of_two (which can't overflow because of the previous division).
159
193k
    Some(adjusted_cap.next_power_of_two())
160
193k
}
161
162
/// Returns the maximum effective capacity for the given bucket mask, taking
163
/// the maximum load factor into account.
164
#[inline]
165
387k
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
166
387k
    if bucket_mask < 8 {
167
        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
168
        // Keep in mind that the bucket mask is one less than the bucket count.
169
0
        bucket_mask
170
    } else {
171
        // For larger tables we reserve 12.5% of the slots as empty.
172
387k
        ((bucket_mask + 1) / 8) * 7
173
    }
174
387k
}
175
176
/// Helper which allows the max calculation for `ctrl_align` to be statically computed for each `T`
177
/// while keeping the rest of `calculate_layout_for` independent of `T`
178
#[derive(Copy, Clone)]
179
struct TableLayout {
180
    size: usize,
181
    ctrl_align: usize,
182
}
183
184
impl TableLayout {
185
    #[inline]
186
    const fn new<T>() -> Self {
187
        let layout = Layout::new::<T>();
188
        Self {
189
            size: layout.size(),
190
            ctrl_align: if layout.align() > Group::WIDTH {
191
                layout.align()
192
            } else {
193
                Group::WIDTH
194
            },
195
        }
196
    }
197
198
    #[inline]
199
387k
    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
200
387k
        debug_assert!(buckets.is_power_of_two());
201
202
387k
        let TableLayout { size, ctrl_align } = self;
203
        // Manual layout calculation since Layout methods are not yet stable.
204
387k
        let ctrl_offset =
205
387k
            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
206
387k
        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
207
208
        // We need an additional check to ensure that the allocation doesn't
209
        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
210
387k
        if len > isize::MAX as usize - (ctrl_align - 1) {
211
0
            return None;
212
387k
        }
213
387k
214
387k
        Some((
215
387k
            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
216
387k
            ctrl_offset,
217
387k
        ))
218
387k
    }
219
}
220
221
/// A reference to an empty bucket into which an can be inserted.
222
pub struct InsertSlot {
223
    index: usize,
224
}
225
226
/// A reference to a hash table bucket containing a `T`.
227
///
228
/// This is usually just a pointer to the element itself. However if the element
229
/// is a ZST, then we instead track the index of the element in the table so
230
/// that `erase` works properly.
231
pub struct Bucket<T> {
232
    // Actually it is pointer to next element than element itself
233
    // this is needed to maintain pointer arithmetic invariants
234
    // keeping direct pointer to element introduces difficulty.
235
    // Using `NonNull` for variance and niche layout
236
    ptr: NonNull<T>,
237
}
238
239
// This Send impl is needed for rayon support. This is safe since Bucket is
240
// never exposed in a public API.
241
unsafe impl<T> Send for Bucket<T> {}
242
243
impl<T> Clone for Bucket<T> {
244
    #[inline]
245
    fn clone(&self) -> Self {
246
        Self { ptr: self.ptr }
247
    }
248
}
249
250
impl<T> Bucket<T> {
251
    /// Creates a [`Bucket`] that contain pointer to the data.
252
    /// The pointer calculation is performed by calculating the
253
    /// offset from given `base` pointer (convenience for
254
    /// `base.as_ptr().sub(index)`).
255
    ///
256
    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
257
    /// offset of `3 * size_of::<T>()` bytes.
258
    ///
259
    /// If the `T` is a ZST, then we instead track the index of the element
260
    /// in the table so that `erase` works properly (return
261
    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
262
    ///
263
    /// # Safety
264
    ///
265
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
266
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
267
    /// rules of [`NonNull::new_unchecked`] function.
268
    ///
269
    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
270
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
271
    /// logic of the work of this crate, the following rules are necessary and
272
    /// sufficient:
273
    ///
274
    /// * the `base` pointer must not be `dangling` and must points to the
275
    ///   end of the first `value element` from the `data part` of the table, i.e.
276
    ///   must be the pointer that returned by [`RawTable::data_end`] or by
277
    ///   [`RawTableInner::data_end<T>`];
278
    ///
279
    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
280
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
281
    ///   must be no greater than the number returned by the function
282
    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
283
    ///
284
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
285
    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
286
    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
287
    /// must be no greater than the number returned by the function
288
    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
289
    ///
290
    /// [`Bucket`]: crate::raw::Bucket
291
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
292
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
293
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
294
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
295
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
296
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
297
    #[inline]
298
1.65M
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300
        // the data part of the table (we start counting from "0", so that
301
        // in the expression T[last], the "last" index actually one less than the
302
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303
        //
304
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305
        //                   points here in the data part of the table
306
        //                   (to the start of T1)
307
        //                        |
308
        //                        |        `base: NonNull<T>` must point here
309
        //                        |         (to the end of T0 or to the start of C0)
310
        //                        v         v
311
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312
        //                           ^
313
        //                           `from_base_index(base, 1)` returns a pointer
314
        //                           that points here in the data part of the table
315
        //                           (to the end of T1)
316
        //
317
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318
        // or metadata for data.
319
1.65M
        let ptr = if T::IS_ZERO_SIZED {
320
            // won't overflow because index must be less than length (bucket_mask)
321
            // and bucket_mask is guaranteed to be less than `isize::MAX`
322
            // (see TableLayout::calculate_layout_for method)
323
0
            invalid_mut(index + 1)
324
        } else {
325
1.65M
            base.as_ptr().sub(index)
326
        };
327
1.65M
        Self {
328
1.65M
            ptr: NonNull::new_unchecked(ptr),
329
1.65M
        }
330
1.65M
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::from_base_index
Line
Count
Source
298
404k
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300
        // the data part of the table (we start counting from "0", so that
301
        // in the expression T[last], the "last" index actually one less than the
302
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303
        //
304
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305
        //                   points here in the data part of the table
306
        //                   (to the start of T1)
307
        //                        |
308
        //                        |        `base: NonNull<T>` must point here
309
        //                        |         (to the end of T0 or to the start of C0)
310
        //                        v         v
311
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312
        //                           ^
313
        //                           `from_base_index(base, 1)` returns a pointer
314
        //                           that points here in the data part of the table
315
        //                           (to the end of T1)
316
        //
317
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318
        // or metadata for data.
319
404k
        let ptr = if T::IS_ZERO_SIZED {
320
            // won't overflow because index must be less than length (bucket_mask)
321
            // and bucket_mask is guaranteed to be less than `isize::MAX`
322
            // (see TableLayout::calculate_layout_for method)
323
0
            invalid_mut(index + 1)
324
        } else {
325
404k
            base.as_ptr().sub(index)
326
        };
327
404k
        Self {
328
404k
            ptr: NonNull::new_unchecked(ptr),
329
404k
        }
330
404k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::from_base_index
Line
Count
Source
298
600k
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300
        // the data part of the table (we start counting from "0", so that
301
        // in the expression T[last], the "last" index actually one less than the
302
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303
        //
304
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305
        //                   points here in the data part of the table
306
        //                   (to the start of T1)
307
        //                        |
308
        //                        |        `base: NonNull<T>` must point here
309
        //                        |         (to the end of T0 or to the start of C0)
310
        //                        v         v
311
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312
        //                           ^
313
        //                           `from_base_index(base, 1)` returns a pointer
314
        //                           that points here in the data part of the table
315
        //                           (to the end of T1)
316
        //
317
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318
        // or metadata for data.
319
600k
        let ptr = if T::IS_ZERO_SIZED {
320
            // won't overflow because index must be less than length (bucket_mask)
321
            // and bucket_mask is guaranteed to be less than `isize::MAX`
322
            // (see TableLayout::calculate_layout_for method)
323
0
            invalid_mut(index + 1)
324
        } else {
325
600k
            base.as_ptr().sub(index)
326
        };
327
600k
        Self {
328
600k
            ptr: NonNull::new_unchecked(ptr),
329
600k
        }
330
600k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::from_base_index
Line
Count
Source
298
296k
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300
        // the data part of the table (we start counting from "0", so that
301
        // in the expression T[last], the "last" index actually one less than the
302
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303
        //
304
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305
        //                   points here in the data part of the table
306
        //                   (to the start of T1)
307
        //                        |
308
        //                        |        `base: NonNull<T>` must point here
309
        //                        |         (to the end of T0 or to the start of C0)
310
        //                        v         v
311
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312
        //                           ^
313
        //                           `from_base_index(base, 1)` returns a pointer
314
        //                           that points here in the data part of the table
315
        //                           (to the end of T1)
316
        //
317
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318
        // or metadata for data.
319
296k
        let ptr = if T::IS_ZERO_SIZED {
320
            // won't overflow because index must be less than length (bucket_mask)
321
            // and bucket_mask is guaranteed to be less than `isize::MAX`
322
            // (see TableLayout::calculate_layout_for method)
323
0
            invalid_mut(index + 1)
324
        } else {
325
296k
            base.as_ptr().sub(index)
326
        };
327
296k
        Self {
328
296k
            ptr: NonNull::new_unchecked(ptr),
329
296k
        }
330
296k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::from_base_index
Line
Count
Source
298
237k
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300
        // the data part of the table (we start counting from "0", so that
301
        // in the expression T[last], the "last" index actually one less than the
302
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303
        //
304
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305
        //                   points here in the data part of the table
306
        //                   (to the start of T1)
307
        //                        |
308
        //                        |        `base: NonNull<T>` must point here
309
        //                        |         (to the end of T0 or to the start of C0)
310
        //                        v         v
311
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312
        //                           ^
313
        //                           `from_base_index(base, 1)` returns a pointer
314
        //                           that points here in the data part of the table
315
        //                           (to the end of T1)
316
        //
317
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318
        // or metadata for data.
319
237k
        let ptr = if T::IS_ZERO_SIZED {
320
            // won't overflow because index must be less than length (bucket_mask)
321
            // and bucket_mask is guaranteed to be less than `isize::MAX`
322
            // (see TableLayout::calculate_layout_for method)
323
0
            invalid_mut(index + 1)
324
        } else {
325
237k
            base.as_ptr().sub(index)
326
        };
327
237k
        Self {
328
237k
            ptr: NonNull::new_unchecked(ptr),
329
237k
        }
330
237k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::from_base_index
Line
Count
Source
298
119k
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300
        // the data part of the table (we start counting from "0", so that
301
        // in the expression T[last], the "last" index actually one less than the
302
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303
        //
304
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305
        //                   points here in the data part of the table
306
        //                   (to the start of T1)
307
        //                        |
308
        //                        |        `base: NonNull<T>` must point here
309
        //                        |         (to the end of T0 or to the start of C0)
310
        //                        v         v
311
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312
        //                           ^
313
        //                           `from_base_index(base, 1)` returns a pointer
314
        //                           that points here in the data part of the table
315
        //                           (to the end of T1)
316
        //
317
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318
        // or metadata for data.
319
119k
        let ptr = if T::IS_ZERO_SIZED {
320
            // won't overflow because index must be less than length (bucket_mask)
321
            // and bucket_mask is guaranteed to be less than `isize::MAX`
322
            // (see TableLayout::calculate_layout_for method)
323
0
            invalid_mut(index + 1)
324
        } else {
325
119k
            base.as_ptr().sub(index)
326
        };
327
119k
        Self {
328
119k
            ptr: NonNull::new_unchecked(ptr),
329
119k
        }
330
119k
    }
331
332
    /// Calculates the index of a [`Bucket`] as distance between two pointers
333
    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
334
    /// The returned value is in units of T: the distance in bytes divided by
335
    /// [`core::mem::size_of::<T>()`].
336
    ///
337
    /// If the `T` is a ZST, then we return the index of the element in
338
    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
339
    ///
340
    /// This function is the inverse of [`from_base_index`].
341
    ///
342
    /// # Safety
343
    ///
344
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
345
    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
346
    ///
347
    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
348
    /// method, as well as for the correct logic of the work of this crate, the
349
    /// following rules are necessary and sufficient:
350
    ///
351
    /// * `base` contained pointer must not be `dangling` and must point to the
352
    ///   end of the first `element` from the `data part` of the table, i.e.
353
    ///   must be a pointer that returns by [`RawTable::data_end`] or by
354
    ///   [`RawTableInner::data_end<T>`];
355
    ///
356
    /// * `self` also must not contain dangling pointer;
357
    ///
358
    /// * both `self` and `base` must be created from the same [`RawTable`]
359
    ///   (or [`RawTableInner`]).
360
    ///
361
    /// If `mem::size_of::<T>() == 0`, this function is always safe.
362
    ///
363
    /// [`Bucket`]: crate::raw::Bucket
364
    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
365
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
366
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
367
    /// [`RawTable`]: crate::raw::RawTable
368
    /// [`RawTableInner`]: RawTableInner
369
    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
370
    #[inline]
371
337k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372
337k
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373
337k
        // `element` in the data part of the table (we start counting from "0", so
374
337k
        // that in the expression T[last], the "last" index actually is one less than the
375
337k
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376
337k
        // For example for 5th element in table calculation is performed like this:
377
337k
        //
378
337k
        //                        mem::size_of::<T>()
379
337k
        //                          |
380
337k
        //                          |         `self = from_base_index(base, 5)` that returns pointer
381
337k
        //                          |         that points here in the data part of the table
382
337k
        //                          |         (to the end of T5)
383
337k
        //                          |           |                    `base: NonNull<T>` must point here
384
337k
        //                          v           |                    (to the end of T0 or to the start of C0)
385
337k
        //                        /???\         v                      v
386
337k
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387
337k
        //                                      \__________  __________/
388
337k
        //                                                 \/
389
337k
        //                                     `bucket.to_base_index(base)` = 5
390
337k
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391
337k
        //
392
337k
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393
337k
        if T::IS_ZERO_SIZED {
394
            // this can not be UB
395
0
            self.ptr.as_ptr() as usize - 1
396
        } else {
397
337k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
398
        }
399
337k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::to_base_index
Line
Count
Source
371
23.4k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372
23.4k
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373
23.4k
        // `element` in the data part of the table (we start counting from "0", so
374
23.4k
        // that in the expression T[last], the "last" index actually is one less than the
375
23.4k
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376
23.4k
        // For example for 5th element in table calculation is performed like this:
377
23.4k
        //
378
23.4k
        //                        mem::size_of::<T>()
379
23.4k
        //                          |
380
23.4k
        //                          |         `self = from_base_index(base, 5)` that returns pointer
381
23.4k
        //                          |         that points here in the data part of the table
382
23.4k
        //                          |         (to the end of T5)
383
23.4k
        //                          |           |                    `base: NonNull<T>` must point here
384
23.4k
        //                          v           |                    (to the end of T0 or to the start of C0)
385
23.4k
        //                        /???\         v                      v
386
23.4k
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387
23.4k
        //                                      \__________  __________/
388
23.4k
        //                                                 \/
389
23.4k
        //                                     `bucket.to_base_index(base)` = 5
390
23.4k
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391
23.4k
        //
392
23.4k
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393
23.4k
        if T::IS_ZERO_SIZED {
394
            // this can not be UB
395
0
            self.ptr.as_ptr() as usize - 1
396
        } else {
397
23.4k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
398
        }
399
23.4k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::to_base_index
Line
Count
Source
371
179k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372
179k
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373
179k
        // `element` in the data part of the table (we start counting from "0", so
374
179k
        // that in the expression T[last], the "last" index actually is one less than the
375
179k
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376
179k
        // For example for 5th element in table calculation is performed like this:
377
179k
        //
378
179k
        //                        mem::size_of::<T>()
379
179k
        //                          |
380
179k
        //                          |         `self = from_base_index(base, 5)` that returns pointer
381
179k
        //                          |         that points here in the data part of the table
382
179k
        //                          |         (to the end of T5)
383
179k
        //                          |           |                    `base: NonNull<T>` must point here
384
179k
        //                          v           |                    (to the end of T0 or to the start of C0)
385
179k
        //                        /???\         v                      v
386
179k
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387
179k
        //                                      \__________  __________/
388
179k
        //                                                 \/
389
179k
        //                                     `bucket.to_base_index(base)` = 5
390
179k
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391
179k
        //
392
179k
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393
179k
        if T::IS_ZERO_SIZED {
394
            // this can not be UB
395
0
            self.ptr.as_ptr() as usize - 1
396
        } else {
397
179k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
398
        }
399
179k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::to_base_index
Line
Count
Source
371
113k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372
113k
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373
113k
        // `element` in the data part of the table (we start counting from "0", so
374
113k
        // that in the expression T[last], the "last" index actually is one less than the
375
113k
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376
113k
        // For example for 5th element in table calculation is performed like this:
377
113k
        //
378
113k
        //                        mem::size_of::<T>()
379
113k
        //                          |
380
113k
        //                          |         `self = from_base_index(base, 5)` that returns pointer
381
113k
        //                          |         that points here in the data part of the table
382
113k
        //                          |         (to the end of T5)
383
113k
        //                          |           |                    `base: NonNull<T>` must point here
384
113k
        //                          v           |                    (to the end of T0 or to the start of C0)
385
113k
        //                        /???\         v                      v
386
113k
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387
113k
        //                                      \__________  __________/
388
113k
        //                                                 \/
389
113k
        //                                     `bucket.to_base_index(base)` = 5
390
113k
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391
113k
        //
392
113k
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393
113k
        if T::IS_ZERO_SIZED {
394
            // this can not be UB
395
0
            self.ptr.as_ptr() as usize - 1
396
        } else {
397
113k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
398
        }
399
113k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::to_base_index
Line
Count
Source
371
15.9k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372
15.9k
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373
15.9k
        // `element` in the data part of the table (we start counting from "0", so
374
15.9k
        // that in the expression T[last], the "last" index actually is one less than the
375
15.9k
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376
15.9k
        // For example for 5th element in table calculation is performed like this:
377
15.9k
        //
378
15.9k
        //                        mem::size_of::<T>()
379
15.9k
        //                          |
380
15.9k
        //                          |         `self = from_base_index(base, 5)` that returns pointer
381
15.9k
        //                          |         that points here in the data part of the table
382
15.9k
        //                          |         (to the end of T5)
383
15.9k
        //                          |           |                    `base: NonNull<T>` must point here
384
15.9k
        //                          v           |                    (to the end of T0 or to the start of C0)
385
15.9k
        //                        /???\         v                      v
386
15.9k
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387
15.9k
        //                                      \__________  __________/
388
15.9k
        //                                                 \/
389
15.9k
        //                                     `bucket.to_base_index(base)` = 5
390
15.9k
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391
15.9k
        //
392
15.9k
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393
15.9k
        if T::IS_ZERO_SIZED {
394
            // this can not be UB
395
0
            self.ptr.as_ptr() as usize - 1
396
        } else {
397
15.9k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
398
        }
399
15.9k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::to_base_index
Line
Count
Source
371
5.27k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372
5.27k
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373
5.27k
        // `element` in the data part of the table (we start counting from "0", so
374
5.27k
        // that in the expression T[last], the "last" index actually is one less than the
375
5.27k
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376
5.27k
        // For example for 5th element in table calculation is performed like this:
377
5.27k
        //
378
5.27k
        //                        mem::size_of::<T>()
379
5.27k
        //                          |
380
5.27k
        //                          |         `self = from_base_index(base, 5)` that returns pointer
381
5.27k
        //                          |         that points here in the data part of the table
382
5.27k
        //                          |         (to the end of T5)
383
5.27k
        //                          |           |                    `base: NonNull<T>` must point here
384
5.27k
        //                          v           |                    (to the end of T0 or to the start of C0)
385
5.27k
        //                        /???\         v                      v
386
5.27k
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387
5.27k
        //                                      \__________  __________/
388
5.27k
        //                                                 \/
389
5.27k
        //                                     `bucket.to_base_index(base)` = 5
390
5.27k
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391
5.27k
        //
392
5.27k
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393
5.27k
        if T::IS_ZERO_SIZED {
394
            // this can not be UB
395
0
            self.ptr.as_ptr() as usize - 1
396
        } else {
397
5.27k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
398
        }
399
5.27k
    }
400
401
    /// Acquires the underlying raw pointer `*mut T` to `data`.
402
    ///
403
    /// # Note
404
    ///
405
    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
406
    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
407
    /// for properly dropping the data we also need to clear `data` control bytes. If we
408
    /// drop data, but do not clear `data control byte` it leads to double drop when
409
    /// [`RawTable`] goes out of scope.
410
    ///
411
    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
412
    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
413
    /// will not re-evaluate where the new value should go, meaning the value may become
414
    /// "lost" if their location does not reflect their state.
415
    ///
416
    /// [`RawTable`]: crate::raw::RawTable
417
    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
418
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
419
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
420
    #[inline]
421
1.63M
    pub fn as_ptr(&self) -> *mut T {
422
1.63M
        if T::IS_ZERO_SIZED {
423
            // Just return an arbitrary ZST pointer which is properly aligned
424
            // invalid pointer is good enough for ZST
425
0
            invalid_mut(mem::align_of::<T>())
426
        } else {
427
1.63M
            unsafe { self.ptr.as_ptr().sub(1) }
428
        }
429
1.63M
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::as_ptr
Line
Count
Source
421
406k
    pub fn as_ptr(&self) -> *mut T {
422
406k
        if T::IS_ZERO_SIZED {
423
            // Just return an arbitrary ZST pointer which is properly aligned
424
            // invalid pointer is good enough for ZST
425
0
            invalid_mut(mem::align_of::<T>())
426
        } else {
427
406k
            unsafe { self.ptr.as_ptr().sub(1) }
428
        }
429
406k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::as_ptr
Line
Count
Source
421
653k
    pub fn as_ptr(&self) -> *mut T {
422
653k
        if T::IS_ZERO_SIZED {
423
            // Just return an arbitrary ZST pointer which is properly aligned
424
            // invalid pointer is good enough for ZST
425
0
            invalid_mut(mem::align_of::<T>())
426
        } else {
427
653k
            unsafe { self.ptr.as_ptr().sub(1) }
428
        }
429
653k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::as_ptr
Line
Count
Source
421
271k
    pub fn as_ptr(&self) -> *mut T {
422
271k
        if T::IS_ZERO_SIZED {
423
            // Just return an arbitrary ZST pointer which is properly aligned
424
            // invalid pointer is good enough for ZST
425
0
            invalid_mut(mem::align_of::<T>())
426
        } else {
427
271k
            unsafe { self.ptr.as_ptr().sub(1) }
428
        }
429
271k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::as_ptr
Line
Count
Source
421
211k
    pub fn as_ptr(&self) -> *mut T {
422
211k
        if T::IS_ZERO_SIZED {
423
            // Just return an arbitrary ZST pointer which is properly aligned
424
            // invalid pointer is good enough for ZST
425
0
            invalid_mut(mem::align_of::<T>())
426
        } else {
427
211k
            unsafe { self.ptr.as_ptr().sub(1) }
428
        }
429
211k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::as_ptr
Line
Count
Source
421
89.7k
    pub fn as_ptr(&self) -> *mut T {
422
89.7k
        if T::IS_ZERO_SIZED {
423
            // Just return an arbitrary ZST pointer which is properly aligned
424
            // invalid pointer is good enough for ZST
425
0
            invalid_mut(mem::align_of::<T>())
426
        } else {
427
89.7k
            unsafe { self.ptr.as_ptr().sub(1) }
428
        }
429
89.7k
    }
430
431
    /// Acquires the underlying non-null pointer `*mut T` to `data`.
432
    #[inline]
433
    fn as_non_null(&self) -> NonNull<T> {
434
        // SAFETY: `self.ptr` is already a `NonNull`
435
        unsafe { NonNull::new_unchecked(self.as_ptr()) }
436
    }
437
438
    /// Create a new [`Bucket`] that is offset from the `self` by the given
439
    /// `offset`. The pointer calculation is performed by calculating the
440
    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
441
    /// This function is used for iterators.
442
    ///
443
    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
444
    /// offset of `3 * size_of::<T>()` bytes.
445
    ///
446
    /// # Safety
447
    ///
448
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
449
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
450
    /// rules of [`NonNull::new_unchecked`] function.
451
    ///
452
    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
453
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
454
    /// logic of the work of this crate, the following rules are necessary and
455
    /// sufficient:
456
    ///
457
    /// * `self` contained pointer must not be `dangling`;
458
    ///
459
    /// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
460
    ///   i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other
461
    ///   words, `self.to_base_index() + offset + 1` must be no greater than the number returned
462
    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
463
    ///
464
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
465
    /// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
466
    /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words,
467
    /// `self.to_base_index() + offset + 1` must be no greater than the number returned by the
468
    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
469
    ///
470
    /// [`Bucket`]: crate::raw::Bucket
471
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
472
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
473
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
474
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
475
    #[inline]
476
1.30M
    unsafe fn next_n(&self, offset: usize) -> Self {
477
1.30M
        let ptr = if T::IS_ZERO_SIZED {
478
            // invalid pointer is good enough for ZST
479
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
480
        } else {
481
1.30M
            self.ptr.as_ptr().sub(offset)
482
        };
483
1.30M
        Self {
484
1.30M
            ptr: NonNull::new_unchecked(ptr),
485
1.30M
        }
486
1.30M
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::next_n
Line
Count
Source
476
578k
    unsafe fn next_n(&self, offset: usize) -> Self {
477
578k
        let ptr = if T::IS_ZERO_SIZED {
478
            // invalid pointer is good enough for ZST
479
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
480
        } else {
481
578k
            self.ptr.as_ptr().sub(offset)
482
        };
483
578k
        Self {
484
578k
            ptr: NonNull::new_unchecked(ptr),
485
578k
        }
486
578k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::next_n
Line
Count
Source
476
442k
    unsafe fn next_n(&self, offset: usize) -> Self {
477
442k
        let ptr = if T::IS_ZERO_SIZED {
478
            // invalid pointer is good enough for ZST
479
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
480
        } else {
481
442k
            self.ptr.as_ptr().sub(offset)
482
        };
483
442k
        Self {
484
442k
            ptr: NonNull::new_unchecked(ptr),
485
442k
        }
486
442k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::next_n
Line
Count
Source
476
42.6k
    unsafe fn next_n(&self, offset: usize) -> Self {
477
42.6k
        let ptr = if T::IS_ZERO_SIZED {
478
            // invalid pointer is good enough for ZST
479
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
480
        } else {
481
42.6k
            self.ptr.as_ptr().sub(offset)
482
        };
483
42.6k
        Self {
484
42.6k
            ptr: NonNull::new_unchecked(ptr),
485
42.6k
        }
486
42.6k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::next_n
Line
Count
Source
476
219k
    unsafe fn next_n(&self, offset: usize) -> Self {
477
219k
        let ptr = if T::IS_ZERO_SIZED {
478
            // invalid pointer is good enough for ZST
479
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
480
        } else {
481
219k
            self.ptr.as_ptr().sub(offset)
482
        };
483
219k
        Self {
484
219k
            ptr: NonNull::new_unchecked(ptr),
485
219k
        }
486
219k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::next_n
Line
Count
Source
476
19.9k
    unsafe fn next_n(&self, offset: usize) -> Self {
477
19.9k
        let ptr = if T::IS_ZERO_SIZED {
478
            // invalid pointer is good enough for ZST
479
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
480
        } else {
481
19.9k
            self.ptr.as_ptr().sub(offset)
482
        };
483
19.9k
        Self {
484
19.9k
            ptr: NonNull::new_unchecked(ptr),
485
19.9k
        }
486
19.9k
    }
487
488
    /// Executes the destructor (if any) of the pointed-to `data`.
489
    ///
490
    /// # Safety
491
    ///
492
    /// See [`ptr::drop_in_place`] for safety concerns.
493
    ///
494
    /// You should use [`RawTable::erase`] instead of this function,
495
    /// or be careful with calling this function directly, because for
496
    /// properly dropping the data we need also clear `data` control bytes.
497
    /// If we drop data, but do not erase `data control byte` it leads to
498
    /// double drop when [`RawTable`] goes out of scope.
499
    ///
500
    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
501
    /// [`RawTable`]: crate::raw::RawTable
502
    /// [`RawTable::erase`]: crate::raw::RawTable::erase
503
    #[cfg_attr(feature = "inline-more", inline)]
504
    pub(crate) unsafe fn drop(&self) {
505
        self.as_ptr().drop_in_place();
506
    }
507
508
    /// Reads the `value` from `self` without moving it. This leaves the
509
    /// memory in `self` unchanged.
510
    ///
511
    /// # Safety
512
    ///
513
    /// See [`ptr::read`] for safety concerns.
514
    ///
515
    /// You should use [`RawTable::remove`] instead of this function,
516
    /// or be careful with calling this function directly, because compiler
517
    /// calls its destructor when the read `value` goes out of scope. It
518
    /// can cause double dropping when [`RawTable`] goes out of scope,
519
    /// because of not erased `data control byte`.
520
    ///
521
    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
522
    /// [`RawTable`]: crate::raw::RawTable
523
    /// [`RawTable::remove`]: crate::raw::RawTable::remove
524
    #[inline]
525
337k
    pub(crate) unsafe fn read(&self) -> T {
526
337k
        self.as_ptr().read()
527
337k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::read
Line
Count
Source
525
52.3k
    pub(crate) unsafe fn read(&self) -> T {
526
52.3k
        self.as_ptr().read()
527
52.3k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::read
Line
Count
Source
525
182k
    pub(crate) unsafe fn read(&self) -> T {
526
182k
        self.as_ptr().read()
527
182k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::read
Line
Count
Source
525
70.9k
    pub(crate) unsafe fn read(&self) -> T {
526
70.9k
        self.as_ptr().read()
527
70.9k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::read
Line
Count
Source
525
20.4k
    pub(crate) unsafe fn read(&self) -> T {
526
20.4k
        self.as_ptr().read()
527
20.4k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::read
Line
Count
Source
525
11.2k
    pub(crate) unsafe fn read(&self) -> T {
526
11.2k
        self.as_ptr().read()
527
11.2k
    }
528
529
    /// Overwrites a memory location with the given `value` without reading
530
    /// or dropping the old value (like [`ptr::write`] function).
531
    ///
532
    /// # Safety
533
    ///
534
    /// See [`ptr::write`] for safety concerns.
535
    ///
536
    /// # Note
537
    ///
538
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
539
    /// those for the old `T` value, as the map will not re-evaluate where the new
540
    /// value should go, meaning the value may become "lost" if their location
541
    /// does not reflect their state.
542
    ///
543
    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
544
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
545
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
546
    #[inline]
547
337k
    pub(crate) unsafe fn write(&self, val: T) {
548
337k
        self.as_ptr().write(val);
549
337k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::write
Line
Count
Source
547
52.3k
    pub(crate) unsafe fn write(&self, val: T) {
548
52.3k
        self.as_ptr().write(val);
549
52.3k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::write
Line
Count
Source
547
182k
    pub(crate) unsafe fn write(&self, val: T) {
548
182k
        self.as_ptr().write(val);
549
182k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::write
Line
Count
Source
547
70.9k
    pub(crate) unsafe fn write(&self, val: T) {
548
70.9k
        self.as_ptr().write(val);
549
70.9k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::write
Line
Count
Source
547
20.4k
    pub(crate) unsafe fn write(&self, val: T) {
548
20.4k
        self.as_ptr().write(val);
549
20.4k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::write
Line
Count
Source
547
11.2k
    pub(crate) unsafe fn write(&self, val: T) {
548
11.2k
        self.as_ptr().write(val);
549
11.2k
    }
550
551
    /// Returns a shared immutable reference to the `value`.
552
    ///
553
    /// # Safety
554
    ///
555
    /// See [`NonNull::as_ref`] for safety concerns.
556
    ///
557
    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
558
    #[inline]
559
565k
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560
565k
        &*self.as_ptr()
561
565k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::as_ref
Line
Count
Source
559
156k
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560
156k
        &*self.as_ptr()
561
156k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::as_ref
Line
Count
Source
559
190k
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560
190k
        &*self.as_ptr()
561
190k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::as_ref
Line
Count
Source
559
93.5k
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560
93.5k
        &*self.as_ptr()
561
93.5k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::as_ref
Line
Count
Source
559
89.5k
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560
89.5k
        &*self.as_ptr()
561
89.5k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::as_ref
Line
Count
Source
559
35.0k
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560
35.0k
        &*self.as_ptr()
561
35.0k
    }
562
563
    /// Returns a unique mutable reference to the `value`.
564
    ///
565
    /// # Safety
566
    ///
567
    /// See [`NonNull::as_mut`] for safety concerns.
568
    ///
569
    /// # Note
570
    ///
571
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
572
    /// those for the old `T` value, as the map will not re-evaluate where the new
573
    /// value should go, meaning the value may become "lost" if their location
574
    /// does not reflect their state.
575
    ///
576
    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
577
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
578
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
579
    #[inline]
580
393k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581
393k
        &mut *self.as_ptr()
582
393k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::as_mut
Line
Count
Source
580
144k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581
144k
        &mut *self.as_ptr()
582
144k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::as_mut
Line
Count
Source
580
99.0k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581
99.0k
        &mut *self.as_ptr()
582
99.0k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::as_mut
Line
Count
Source
580
36.1k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581
36.1k
        &mut *self.as_ptr()
582
36.1k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::as_mut
Line
Count
Source
580
81.3k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581
81.3k
        &mut *self.as_ptr()
582
81.3k
    }
<hashbrown::raw::Bucket<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::as_mut
Line
Count
Source
580
32.1k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581
32.1k
        &mut *self.as_ptr()
582
32.1k
    }
583
}
584
585
/// A raw hash table with an unsafe API.
586
pub struct RawTable<T, A: Allocator = Global> {
587
    table: RawTableInner,
588
    alloc: A,
589
    // Tell dropck that we own instances of T.
590
    marker: PhantomData<T>,
591
}
592
593
/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
594
/// of how many different key-value types are used.
595
struct RawTableInner {
596
    // Mask to get an index from a hash value. The value is one less than the
597
    // number of buckets in the table.
598
    bucket_mask: usize,
599
600
    // [Padding], T_n, ..., T1, T0, C0, C1, ...
601
    //                              ^ points here
602
    ctrl: NonNull<u8>,
603
604
    // Number of elements that can be inserted before we need to grow the table
605
    growth_left: usize,
606
607
    // Number of elements in the table, only really used by len()
608
    items: usize,
609
}
610
611
impl<T> RawTable<T, Global> {
612
    /// Creates a new empty hash table without allocating any memory.
613
    ///
614
    /// In effect this returns a table with exactly 1 bucket. However we can
615
    /// leave the data pointer dangling since that bucket is never written to
616
    /// due to our load factor forcing us to always have at least 1 free bucket.
617
    #[inline]
618
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
619
    pub const fn new() -> Self {
620
        Self {
621
            table: RawTableInner::NEW,
622
            alloc: Global,
623
            marker: PhantomData,
624
        }
625
    }
626
627
    /// Allocates a new hash table with at least enough capacity for inserting
628
    /// the given number of elements without reallocating.
629
193k
    pub fn with_capacity(capacity: usize) -> Self {
630
193k
        Self::with_capacity_in(capacity, Global)
631
193k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::with_capacity
Line
Count
Source
629
38.7k
    pub fn with_capacity(capacity: usize) -> Self {
630
38.7k
        Self::with_capacity_in(capacity, Global)
631
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::with_capacity
Line
Count
Source
629
38.7k
    pub fn with_capacity(capacity: usize) -> Self {
630
38.7k
        Self::with_capacity_in(capacity, Global)
631
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::with_capacity
Line
Count
Source
629
38.7k
    pub fn with_capacity(capacity: usize) -> Self {
630
38.7k
        Self::with_capacity_in(capacity, Global)
631
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::with_capacity
Line
Count
Source
629
38.7k
    pub fn with_capacity(capacity: usize) -> Self {
630
38.7k
        Self::with_capacity_in(capacity, Global)
631
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::with_capacity
Line
Count
Source
629
38.7k
    pub fn with_capacity(capacity: usize) -> Self {
630
38.7k
        Self::with_capacity_in(capacity, Global)
631
38.7k
    }
632
}
633
634
impl<T, A: Allocator> RawTable<T, A> {
635
    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
636
637
    /// Creates a new empty hash table without allocating any memory, using the
638
    /// given allocator.
639
    ///
640
    /// In effect this returns a table with exactly 1 bucket. However we can
641
    /// leave the data pointer dangling since that bucket is never written to
642
    /// due to our load factor forcing us to always have at least 1 free bucket.
643
    #[inline]
644
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
645
    pub const fn new_in(alloc: A) -> Self {
646
        Self {
647
            table: RawTableInner::NEW,
648
            alloc,
649
            marker: PhantomData,
650
        }
651
    }
652
653
    /// Allocates a new hash table with the given number of buckets.
654
    ///
655
    /// The control bytes are left uninitialized.
656
    #[cfg_attr(feature = "inline-more", inline)]
657
    unsafe fn new_uninitialized(
658
        alloc: A,
659
        buckets: usize,
660
        fallibility: Fallibility,
661
    ) -> Result<Self, TryReserveError> {
662
        debug_assert!(buckets.is_power_of_two());
663
664
        Ok(Self {
665
            table: RawTableInner::new_uninitialized(
666
                &alloc,
667
                Self::TABLE_LAYOUT,
668
                buckets,
669
                fallibility,
670
            )?,
671
            alloc,
672
            marker: PhantomData,
673
        })
674
    }
675
676
    /// Allocates a new hash table using the given allocator, with at least enough capacity for
677
    /// inserting the given number of elements without reallocating.
678
193k
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679
193k
        Self {
680
193k
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681
193k
            alloc,
682
193k
            marker: PhantomData,
683
193k
        }
684
193k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::with_capacity_in
Line
Count
Source
678
38.7k
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679
38.7k
        Self {
680
38.7k
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681
38.7k
            alloc,
682
38.7k
            marker: PhantomData,
683
38.7k
        }
684
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::with_capacity_in
Line
Count
Source
678
38.7k
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679
38.7k
        Self {
680
38.7k
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681
38.7k
            alloc,
682
38.7k
            marker: PhantomData,
683
38.7k
        }
684
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::with_capacity_in
Line
Count
Source
678
38.7k
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679
38.7k
        Self {
680
38.7k
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681
38.7k
            alloc,
682
38.7k
            marker: PhantomData,
683
38.7k
        }
684
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::with_capacity_in
Line
Count
Source
678
38.7k
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679
38.7k
        Self {
680
38.7k
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681
38.7k
            alloc,
682
38.7k
            marker: PhantomData,
683
38.7k
        }
684
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::with_capacity_in
Line
Count
Source
678
38.7k
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679
38.7k
        Self {
680
38.7k
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681
38.7k
            alloc,
682
38.7k
            marker: PhantomData,
683
38.7k
        }
684
38.7k
    }
685
686
    /// Returns a reference to the underlying allocator.
687
    #[inline]
688
    pub fn allocator(&self) -> &A {
689
        &self.alloc
690
    }
691
692
    /// Returns pointer to one past last `data` element in the table as viewed from
693
    /// the start point of the allocation.
694
    ///
695
    /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
696
    /// otherwise using it may result in [`undefined behavior`].
697
    ///
698
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
699
    #[inline]
700
1.80M
    pub fn data_end(&self) -> NonNull<T> {
701
1.80M
        //                        `self.table.ctrl.cast()` returns pointer that
702
1.80M
        //                        points here (to the end of `T0`)
703
1.80M
        //                          ∨
704
1.80M
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705
1.80M
        //                           \________  ________/
706
1.80M
        //                                    \/
707
1.80M
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708
1.80M
        //
709
1.80M
        // where: T0...T_n  - our stored data;
710
1.80M
        //        CT0...CT_n - control bytes or metadata for `data`.
711
1.80M
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712
1.80M
        //                        with loading `Group` bytes from the heap works properly, even if the result
713
1.80M
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714
1.80M
        //                        `RawTableInner::set_ctrl` function.
715
1.80M
        //
716
1.80M
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717
1.80M
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718
1.80M
        self.table.ctrl.cast()
719
1.80M
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::data_end
Line
Count
Source
700
388k
    pub fn data_end(&self) -> NonNull<T> {
701
388k
        //                        `self.table.ctrl.cast()` returns pointer that
702
388k
        //                        points here (to the end of `T0`)
703
388k
        //                          ∨
704
388k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705
388k
        //                           \________  ________/
706
388k
        //                                    \/
707
388k
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708
388k
        //
709
388k
        // where: T0...T_n  - our stored data;
710
388k
        //        CT0...CT_n - control bytes or metadata for `data`.
711
388k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712
388k
        //                        with loading `Group` bytes from the heap works properly, even if the result
713
388k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714
388k
        //                        `RawTableInner::set_ctrl` function.
715
388k
        //
716
388k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717
388k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718
388k
        self.table.ctrl.cast()
719
388k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::data_end
Line
Count
Source
700
740k
    pub fn data_end(&self) -> NonNull<T> {
701
740k
        //                        `self.table.ctrl.cast()` returns pointer that
702
740k
        //                        points here (to the end of `T0`)
703
740k
        //                          ∨
704
740k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705
740k
        //                           \________  ________/
706
740k
        //                                    \/
707
740k
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708
740k
        //
709
740k
        // where: T0...T_n  - our stored data;
710
740k
        //        CT0...CT_n - control bytes or metadata for `data`.
711
740k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712
740k
        //                        with loading `Group` bytes from the heap works properly, even if the result
713
740k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714
740k
        //                        `RawTableInner::set_ctrl` function.
715
740k
        //
716
740k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717
740k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718
740k
        self.table.ctrl.cast()
719
740k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::data_end
Line
Count
Source
700
370k
    pub fn data_end(&self) -> NonNull<T> {
701
370k
        //                        `self.table.ctrl.cast()` returns pointer that
702
370k
        //                        points here (to the end of `T0`)
703
370k
        //                          ∨
704
370k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705
370k
        //                           \________  ________/
706
370k
        //                                    \/
707
370k
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708
370k
        //
709
370k
        // where: T0...T_n  - our stored data;
710
370k
        //        CT0...CT_n - control bytes or metadata for `data`.
711
370k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712
370k
        //                        with loading `Group` bytes from the heap works properly, even if the result
713
370k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714
370k
        //                        `RawTableInner::set_ctrl` function.
715
370k
        //
716
370k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717
370k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718
370k
        self.table.ctrl.cast()
719
370k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::data_end
Line
Count
Source
700
215k
    pub fn data_end(&self) -> NonNull<T> {
701
215k
        //                        `self.table.ctrl.cast()` returns pointer that
702
215k
        //                        points here (to the end of `T0`)
703
215k
        //                          ∨
704
215k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705
215k
        //                           \________  ________/
706
215k
        //                                    \/
707
215k
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708
215k
        //
709
215k
        // where: T0...T_n  - our stored data;
710
215k
        //        CT0...CT_n - control bytes or metadata for `data`.
711
215k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712
215k
        //                        with loading `Group` bytes from the heap works properly, even if the result
713
215k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714
215k
        //                        `RawTableInner::set_ctrl` function.
715
215k
        //
716
215k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717
215k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718
215k
        self.table.ctrl.cast()
719
215k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::data_end
Line
Count
Source
700
86.3k
    pub fn data_end(&self) -> NonNull<T> {
701
86.3k
        //                        `self.table.ctrl.cast()` returns pointer that
702
86.3k
        //                        points here (to the end of `T0`)
703
86.3k
        //                          ∨
704
86.3k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705
86.3k
        //                           \________  ________/
706
86.3k
        //                                    \/
707
86.3k
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708
86.3k
        //
709
86.3k
        // where: T0...T_n  - our stored data;
710
86.3k
        //        CT0...CT_n - control bytes or metadata for `data`.
711
86.3k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712
86.3k
        //                        with loading `Group` bytes from the heap works properly, even if the result
713
86.3k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714
86.3k
        //                        `RawTableInner::set_ctrl` function.
715
86.3k
        //
716
86.3k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717
86.3k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718
86.3k
        self.table.ctrl.cast()
719
86.3k
    }
720
721
    /// Returns pointer to start of data table.
722
    #[inline]
723
    #[cfg(feature = "nightly")]
724
    pub unsafe fn data_start(&self) -> NonNull<T> {
725
        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
726
    }
727
728
    /// Returns the total amount of memory allocated internally by the hash
729
    /// table, in bytes.
730
    ///
731
    /// The returned number is informational only. It is intended to be
732
    /// primarily used for memory profiling.
733
    #[inline]
734
    pub fn allocation_size(&self) -> usize {
735
        // SAFETY: We use the same `table_layout` that was used to allocate
736
        // this table.
737
        unsafe { self.table.allocation_size_or_zero(Self::TABLE_LAYOUT) }
738
    }
739
740
    /// Returns the index of a bucket from a `Bucket`.
741
    #[inline]
742
337k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743
337k
        bucket.to_base_index(self.data_end())
744
337k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::bucket_index
Line
Count
Source
742
23.4k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743
23.4k
        bucket.to_base_index(self.data_end())
744
23.4k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::bucket_index
Line
Count
Source
742
179k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743
179k
        bucket.to_base_index(self.data_end())
744
179k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::bucket_index
Line
Count
Source
742
113k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743
113k
        bucket.to_base_index(self.data_end())
744
113k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::bucket_index
Line
Count
Source
742
15.9k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743
15.9k
        bucket.to_base_index(self.data_end())
744
15.9k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::bucket_index
Line
Count
Source
742
5.27k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743
5.27k
        bucket.to_base_index(self.data_end())
744
5.27k
    }
745
746
    /// Returns a pointer to an element in the table.
747
    ///
748
    /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
749
    /// otherwise using it may result in [`undefined behavior`].
750
    ///
751
    /// # Safety
752
    ///
753
    /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
754
    /// following safety rules:
755
    ///
756
    /// * The table must already be allocated;
757
    ///
758
    /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
759
    ///   function, i.e. `(index + 1) <= self.buckets()`.
760
    ///
761
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
762
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
763
    ///
764
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
765
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
766
    /// `(index + 1) <= self.buckets()`.
767
    ///
768
    /// [`RawTable::buckets`]: RawTable::buckets
769
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
770
    #[inline]
771
1.46M
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772
1.46M
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773
1.46M
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774
1.46M
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775
1.46M
        //
776
1.46M
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777
1.46M
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778
1.46M
        //                  |
779
1.46M
        //                  |               `base = self.data_end()` points here
780
1.46M
        //                  |               (to the start of CT0 or to the end of T0)
781
1.46M
        //                  v                 v
782
1.46M
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783
1.46M
        //                     ^                                              \__________  __________/
784
1.46M
        //        `table.bucket(3)` returns a pointer that points                        \/
785
1.46M
        //         here in the `data` part of the `RawTable` (to              additional control bytes
786
1.46M
        //         the end of T3)                                              `m = Group::WIDTH - 1`
787
1.46M
        //
788
1.46M
        // where: T0...T_n  - our stored data;
789
1.46M
        //        CT0...CT_n - control bytes or metadata for `data`;
790
1.46M
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791
1.46M
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792
1.46M
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793
1.46M
        //
794
1.46M
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795
1.46M
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796
1.46M
        debug_assert_ne!(self.table.bucket_mask, 0);
797
1.46M
        debug_assert!(index < self.buckets());
798
1.46M
        Bucket::from_base_index(self.data_end(), index)
799
1.46M
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::bucket
Line
Count
Source
771
365k
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772
365k
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773
365k
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774
365k
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775
365k
        //
776
365k
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777
365k
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778
365k
        //                  |
779
365k
        //                  |               `base = self.data_end()` points here
780
365k
        //                  |               (to the start of CT0 or to the end of T0)
781
365k
        //                  v                 v
782
365k
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783
365k
        //                     ^                                              \__________  __________/
784
365k
        //        `table.bucket(3)` returns a pointer that points                        \/
785
365k
        //         here in the `data` part of the `RawTable` (to              additional control bytes
786
365k
        //         the end of T3)                                              `m = Group::WIDTH - 1`
787
365k
        //
788
365k
        // where: T0...T_n  - our stored data;
789
365k
        //        CT0...CT_n - control bytes or metadata for `data`;
790
365k
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791
365k
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792
365k
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793
365k
        //
794
365k
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795
365k
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796
365k
        debug_assert_ne!(self.table.bucket_mask, 0);
797
365k
        debug_assert!(index < self.buckets());
798
365k
        Bucket::from_base_index(self.data_end(), index)
799
365k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::bucket
Line
Count
Source
771
561k
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772
561k
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773
561k
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774
561k
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775
561k
        //
776
561k
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777
561k
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778
561k
        //                  |
779
561k
        //                  |               `base = self.data_end()` points here
780
561k
        //                  |               (to the start of CT0 or to the end of T0)
781
561k
        //                  v                 v
782
561k
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783
561k
        //                     ^                                              \__________  __________/
784
561k
        //        `table.bucket(3)` returns a pointer that points                        \/
785
561k
        //         here in the `data` part of the `RawTable` (to              additional control bytes
786
561k
        //         the end of T3)                                              `m = Group::WIDTH - 1`
787
561k
        //
788
561k
        // where: T0...T_n  - our stored data;
789
561k
        //        CT0...CT_n - control bytes or metadata for `data`;
790
561k
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791
561k
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792
561k
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793
561k
        //
794
561k
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795
561k
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796
561k
        debug_assert_ne!(self.table.bucket_mask, 0);
797
561k
        debug_assert!(index < self.buckets());
798
561k
        Bucket::from_base_index(self.data_end(), index)
799
561k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::bucket
Line
Count
Source
771
257k
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772
257k
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773
257k
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774
257k
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775
257k
        //
776
257k
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777
257k
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778
257k
        //                  |
779
257k
        //                  |               `base = self.data_end()` points here
780
257k
        //                  |               (to the start of CT0 or to the end of T0)
781
257k
        //                  v                 v
782
257k
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783
257k
        //                     ^                                              \__________  __________/
784
257k
        //        `table.bucket(3)` returns a pointer that points                        \/
785
257k
        //         here in the `data` part of the `RawTable` (to              additional control bytes
786
257k
        //         the end of T3)                                              `m = Group::WIDTH - 1`
787
257k
        //
788
257k
        // where: T0...T_n  - our stored data;
789
257k
        //        CT0...CT_n - control bytes or metadata for `data`;
790
257k
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791
257k
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792
257k
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793
257k
        //
794
257k
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795
257k
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796
257k
        debug_assert_ne!(self.table.bucket_mask, 0);
797
257k
        debug_assert!(index < self.buckets());
798
257k
        Bucket::from_base_index(self.data_end(), index)
799
257k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::bucket
Line
Count
Source
771
199k
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772
199k
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773
199k
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774
199k
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775
199k
        //
776
199k
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777
199k
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778
199k
        //                  |
779
199k
        //                  |               `base = self.data_end()` points here
780
199k
        //                  |               (to the start of CT0 or to the end of T0)
781
199k
        //                  v                 v
782
199k
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783
199k
        //                     ^                                              \__________  __________/
784
199k
        //        `table.bucket(3)` returns a pointer that points                        \/
785
199k
        //         here in the `data` part of the `RawTable` (to              additional control bytes
786
199k
        //         the end of T3)                                              `m = Group::WIDTH - 1`
787
199k
        //
788
199k
        // where: T0...T_n  - our stored data;
789
199k
        //        CT0...CT_n - control bytes or metadata for `data`;
790
199k
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791
199k
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792
199k
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793
199k
        //
794
199k
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795
199k
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796
199k
        debug_assert_ne!(self.table.bucket_mask, 0);
797
199k
        debug_assert!(index < self.buckets());
798
199k
        Bucket::from_base_index(self.data_end(), index)
799
199k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::bucket
Line
Count
Source
771
81.0k
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772
81.0k
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773
81.0k
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774
81.0k
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775
81.0k
        //
776
81.0k
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777
81.0k
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778
81.0k
        //                  |
779
81.0k
        //                  |               `base = self.data_end()` points here
780
81.0k
        //                  |               (to the start of CT0 or to the end of T0)
781
81.0k
        //                  v                 v
782
81.0k
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783
81.0k
        //                     ^                                              \__________  __________/
784
81.0k
        //        `table.bucket(3)` returns a pointer that points                        \/
785
81.0k
        //         here in the `data` part of the `RawTable` (to              additional control bytes
786
81.0k
        //         the end of T3)                                              `m = Group::WIDTH - 1`
787
81.0k
        //
788
81.0k
        // where: T0...T_n  - our stored data;
789
81.0k
        //        CT0...CT_n - control bytes or metadata for `data`;
790
81.0k
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791
81.0k
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792
81.0k
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793
81.0k
        //
794
81.0k
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795
81.0k
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796
81.0k
        debug_assert_ne!(self.table.bucket_mask, 0);
797
81.0k
        debug_assert!(index < self.buckets());
798
81.0k
        Bucket::from_base_index(self.data_end(), index)
799
81.0k
    }
800
801
    /// Erases an element from the table without dropping it.
802
    #[cfg_attr(feature = "inline-more", inline)]
803
168k
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804
168k
        let index = self.bucket_index(item);
805
168k
        self.table.erase(index);
806
168k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::erase_no_drop
Line
Count
Source
803
11.7k
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804
11.7k
        let index = self.bucket_index(item);
805
11.7k
        self.table.erase(index);
806
11.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::erase_no_drop
Line
Count
Source
803
89.6k
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804
89.6k
        let index = self.bucket_index(item);
805
89.6k
        self.table.erase(index);
806
89.6k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::erase_no_drop
Line
Count
Source
803
56.7k
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804
56.7k
        let index = self.bucket_index(item);
805
56.7k
        self.table.erase(index);
806
56.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::erase_no_drop
Line
Count
Source
803
7.97k
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804
7.97k
        let index = self.bucket_index(item);
805
7.97k
        self.table.erase(index);
806
7.97k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::erase_no_drop
Line
Count
Source
803
2.63k
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804
2.63k
        let index = self.bucket_index(item);
805
2.63k
        self.table.erase(index);
806
2.63k
    }
807
808
    /// Erases an element from the table, dropping it in place.
809
    #[cfg_attr(feature = "inline-more", inline)]
810
    #[allow(clippy::needless_pass_by_value)]
811
    pub unsafe fn erase(&mut self, item: Bucket<T>) {
812
        // Erase the element from the table first since drop might panic.
813
        self.erase_no_drop(&item);
814
        item.drop();
815
    }
816
817
    /// Removes an element from the table, returning it.
818
    ///
819
    /// This also returns an `InsertSlot` pointing to the newly free bucket.
820
    #[cfg_attr(feature = "inline-more", inline)]
821
    #[allow(clippy::needless_pass_by_value)]
822
168k
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823
168k
        self.erase_no_drop(&item);
824
168k
        (
825
168k
            item.read(),
826
168k
            InsertSlot {
827
168k
                index: self.bucket_index(&item),
828
168k
            },
829
168k
        )
830
168k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::remove
Line
Count
Source
822
11.7k
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823
11.7k
        self.erase_no_drop(&item);
824
11.7k
        (
825
11.7k
            item.read(),
826
11.7k
            InsertSlot {
827
11.7k
                index: self.bucket_index(&item),
828
11.7k
            },
829
11.7k
        )
830
11.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::remove
Line
Count
Source
822
89.6k
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823
89.6k
        self.erase_no_drop(&item);
824
89.6k
        (
825
89.6k
            item.read(),
826
89.6k
            InsertSlot {
827
89.6k
                index: self.bucket_index(&item),
828
89.6k
            },
829
89.6k
        )
830
89.6k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::remove
Line
Count
Source
822
56.7k
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823
56.7k
        self.erase_no_drop(&item);
824
56.7k
        (
825
56.7k
            item.read(),
826
56.7k
            InsertSlot {
827
56.7k
                index: self.bucket_index(&item),
828
56.7k
            },
829
56.7k
        )
830
56.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::remove
Line
Count
Source
822
7.97k
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823
7.97k
        self.erase_no_drop(&item);
824
7.97k
        (
825
7.97k
            item.read(),
826
7.97k
            InsertSlot {
827
7.97k
                index: self.bucket_index(&item),
828
7.97k
            },
829
7.97k
        )
830
7.97k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::remove
Line
Count
Source
822
2.63k
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823
2.63k
        self.erase_no_drop(&item);
824
2.63k
        (
825
2.63k
            item.read(),
826
2.63k
            InsertSlot {
827
2.63k
                index: self.bucket_index(&item),
828
2.63k
            },
829
2.63k
        )
830
2.63k
    }
831
832
    /// Finds and removes an element from the table, returning it.
833
    #[cfg_attr(feature = "inline-more", inline)]
834
422k
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
422k
        // Avoid `Option::map` because it bloats LLVM IR.
836
422k
        match self.find(hash, eq) {
837
168k
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
253k
            None => None,
839
        }
840
422k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyRef<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyWrapper<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
834
96.5k
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
96.5k
        // Avoid `Option::map` because it bloats LLVM IR.
836
96.5k
        match self.find(hash, eq) {
837
6.90k
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
89.6k
            None => None,
839
        }
840
96.5k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyWrapper<[u8]>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
834
38.3k
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
38.3k
        // Avoid `Option::map` because it bloats LLVM IR.
836
38.3k
        match self.find(hash, eq) {
837
4.81k
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
33.4k
            None => None,
839
        }
840
38.3k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
834
633
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
633
        // Avoid `Option::map` because it bloats LLVM IR.
836
633
        match self.find(hash, eq) {
837
633
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
0
            None => None,
839
        }
840
633
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
834
162k
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
162k
        // Avoid `Option::map` because it bloats LLVM IR.
836
162k
        match self.find(hash, eq) {
837
88.9k
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
73.8k
            None => None,
839
        }
840
162k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}>
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}>
Line
Count
Source
834
72.0k
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
72.0k
        // Avoid `Option::map` because it bloats LLVM IR.
836
72.0k
        match self.find(hash, eq) {
837
56.7k
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
15.2k
            None => None,
839
        }
840
72.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>
Line
Count
Source
834
17.0k
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
17.0k
        // Avoid `Option::map` because it bloats LLVM IR.
836
17.0k
        match self.find(hash, eq) {
837
7.97k
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
9.07k
            None => None,
839
        }
840
17.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}>
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::remove_entry::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
834
34.8k
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
34.8k
        // Avoid `Option::map` because it bloats LLVM IR.
836
34.8k
        match self.find(hash, eq) {
837
2.63k
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
32.1k
            None => None,
839
        }
840
34.8k
    }
841
842
    /// Marks all table buckets as empty without dropping their contents.
843
    #[cfg_attr(feature = "inline-more", inline)]
844
    pub fn clear_no_drop(&mut self) {
845
        self.table.clear_no_drop();
846
    }
847
848
    /// Removes all elements from the table without freeing the backing memory.
849
    #[cfg_attr(feature = "inline-more", inline)]
850
    pub fn clear(&mut self) {
851
        if self.is_empty() {
852
            // Special case empty table to avoid surprising O(capacity) time.
853
            return;
854
        }
855
        // Ensure that the table is reset even if one of the drops panic
856
        let mut self_ = guard(self, |self_| self_.clear_no_drop());
857
        unsafe {
858
            // SAFETY: ScopeGuard sets to zero the `items` field of the table
859
            // even in case of panic during the dropping of the elements so
860
            // that there will be no double drop of the elements.
861
            self_.table.drop_elements::<T>();
862
        }
863
    }
864
865
    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
866
    #[cfg_attr(feature = "inline-more", inline)]
867
    pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
868
        // Calculate the minimal number of elements that we need to reserve
869
        // space for.
870
        let min_size = usize::max(self.table.items, min_size);
871
        if min_size == 0 {
872
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
873
            unsafe {
874
                // SAFETY:
875
                // 1. We call the function only once;
876
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
877
                //    and [`TableLayout`] that were used to allocate this table.
878
                // 3. If any elements' drop function panics, then there will only be a memory leak,
879
                //    because we have replaced the inner table with a new one.
880
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
881
            }
882
            return;
883
        }
884
885
        // Calculate the number of buckets that we need for this number of
886
        // elements. If the calculation overflows then the requested bucket
887
        // count must be larger than what we have right and nothing needs to be
888
        // done.
889
        let min_buckets = match capacity_to_buckets(min_size, Self::TABLE_LAYOUT) {
890
            Some(buckets) => buckets,
891
            None => return,
892
        };
893
894
        // If we have more buckets than we need, shrink the table.
895
        if min_buckets < self.buckets() {
896
            // Fast path if the table is empty
897
            if self.table.items == 0 {
898
                let new_inner =
899
                    RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
900
                let mut old_inner = mem::replace(&mut self.table, new_inner);
901
                unsafe {
902
                    // SAFETY:
903
                    // 1. We call the function only once;
904
                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
905
                    //    and [`TableLayout`] that were used to allocate this table.
906
                    // 3. If any elements' drop function panics, then there will only be a memory leak,
907
                    //    because we have replaced the inner table with a new one.
908
                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
909
                }
910
            } else {
911
                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
912
                unsafe {
913
                    // SAFETY:
914
                    // 1. We know for sure that `min_size >= self.table.items`.
915
                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
916
                    //    we will never expose RawTable::new_uninitialized in a public API.
917
                    if self
918
                        .resize(min_size, hasher, Fallibility::Infallible)
919
                        .is_err()
920
                    {
921
                        // SAFETY: The result of calling the `resize` function cannot be an error
922
                        // because `fallibility == Fallibility::Infallible.
923
                        hint::unreachable_unchecked()
924
                    }
925
                }
926
            }
927
        }
928
    }
929
930
    /// Ensures that at least `additional` items can be inserted into the table
931
    /// without reallocation.
932
    #[cfg_attr(feature = "inline-more", inline)]
933
337k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934
337k
        if unlikely(additional > self.table.growth_left) {
935
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936
            unsafe {
937
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939
0
                if self
940
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941
0
                    .is_err()
942
                {
943
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944
0
                    hint::unreachable_unchecked()
945
0
                }
946
            }
947
337k
        }
948
337k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::reserve::<hashbrown::map::make_hasher<lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
933
52.3k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934
52.3k
        if unlikely(additional > self.table.growth_left) {
935
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936
            unsafe {
937
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939
0
                if self
940
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941
0
                    .is_err()
942
                {
943
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944
0
                    hint::unreachable_unchecked()
945
0
                }
946
            }
947
52.3k
        }
948
52.3k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::reserve::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
933
182k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934
182k
        if unlikely(additional > self.table.growth_left) {
935
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936
            unsafe {
937
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939
0
                if self
940
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941
0
                    .is_err()
942
                {
943
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944
0
                    hint::unreachable_unchecked()
945
0
                }
946
            }
947
182k
        }
948
182k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::reserve::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
933
70.9k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934
70.9k
        if unlikely(additional > self.table.growth_left) {
935
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936
            unsafe {
937
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939
0
                if self
940
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941
0
                    .is_err()
942
                {
943
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944
0
                    hint::unreachable_unchecked()
945
0
                }
946
            }
947
70.9k
        }
948
70.9k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::reserve::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
933
20.4k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934
20.4k
        if unlikely(additional > self.table.growth_left) {
935
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936
            unsafe {
937
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939
0
                if self
940
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941
0
                    .is_err()
942
                {
943
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944
0
                    hint::unreachable_unchecked()
945
0
                }
946
            }
947
20.4k
        }
948
20.4k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::reserve::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
933
11.2k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934
11.2k
        if unlikely(additional > self.table.growth_left) {
935
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936
            unsafe {
937
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939
0
                if self
940
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941
0
                    .is_err()
942
                {
943
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944
0
                    hint::unreachable_unchecked()
945
0
                }
946
            }
947
11.2k
        }
948
11.2k
    }
949
950
    /// Tries to ensure that at least `additional` items can be inserted into
951
    /// the table without reallocation.
952
    #[cfg_attr(feature = "inline-more", inline)]
953
    pub fn try_reserve(
954
        &mut self,
955
        additional: usize,
956
        hasher: impl Fn(&T) -> u64,
957
    ) -> Result<(), TryReserveError> {
958
        if additional > self.table.growth_left {
959
            // SAFETY: The [`RawTableInner`] must already have properly initialized control
960
            // bytes since we will never expose RawTable::new_uninitialized in a public API.
961
            unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
962
        } else {
963
            Ok(())
964
        }
965
    }
966
967
    /// Out-of-line slow path for `reserve` and `try_reserve`.
968
    ///
969
    /// # Safety
970
    ///
971
    /// The [`RawTableInner`] must have properly initialized control bytes,
972
    /// otherwise calling this function results in [`undefined behavior`]
973
    ///
974
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
975
    #[cold]
976
    #[inline(never)]
977
0
    unsafe fn reserve_rehash(
978
0
        &mut self,
979
0
        additional: usize,
980
0
        hasher: impl Fn(&T) -> u64,
981
0
        fallibility: Fallibility,
982
0
    ) -> Result<(), TryReserveError> {
983
0
        unsafe {
984
0
            // SAFETY:
985
0
            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
986
0
            //    [`TableLayout`] that were used to allocate this table.
987
0
            // 2. The `drop` function is the actual drop function of the elements stored in
988
0
            //    the table.
989
0
            // 3. The caller ensures that the control bytes of the `RawTableInner`
990
0
            //    are already initialized.
991
0
            self.table.reserve_rehash_inner(
992
0
                &self.alloc,
993
0
                additional,
994
0
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
995
0
                fallibility,
996
0
                Self::TABLE_LAYOUT,
997
0
                if T::NEEDS_DROP {
998
0
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
999
                } else {
1000
0
                    None
1001
                },
1002
            )
1003
        }
1004
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>, foldhash::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>, foldhash::fast::RandomState>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::reserve_rehash::<hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
1005
1006
    /// Allocates a new table of a different size and moves the contents of the
1007
    /// current table into it.
1008
    ///
1009
    /// # Safety
1010
    ///
1011
    /// The [`RawTableInner`] must have properly initialized control bytes,
1012
    /// otherwise calling this function results in [`undefined behavior`]
1013
    ///
1014
    /// The caller of this function must ensure that `capacity >= self.table.items`
1015
    /// otherwise:
1016
    ///
1017
    /// * If `self.table.items != 0`, calling of this function with `capacity`
1018
    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1019
    ///
1020
    /// * If `self.table.items > capacity_to_buckets(capacity, Self::TABLE_LAYOUT)`
1021
    ///   calling this function are never return (will loop infinitely).
1022
    ///
1023
    /// See [`RawTableInner::find_insert_slot`] for more information.
1024
    ///
1025
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1026
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1027
    unsafe fn resize(
1028
        &mut self,
1029
        capacity: usize,
1030
        hasher: impl Fn(&T) -> u64,
1031
        fallibility: Fallibility,
1032
    ) -> Result<(), TryReserveError> {
1033
        // SAFETY:
1034
        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1035
        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1036
        //    [`TableLayout`] that were used to allocate this table.
1037
        // 3. The caller ensures that the control bytes of the `RawTableInner`
1038
        //    are already initialized.
1039
        self.table.resize_inner(
1040
            &self.alloc,
1041
            capacity,
1042
            &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1043
            fallibility,
1044
            Self::TABLE_LAYOUT,
1045
        )
1046
    }
1047
1048
    /// Inserts a new element into the table, and returns its raw bucket.
1049
    ///
1050
    /// This does not check if the given element already exists in the table.
1051
    #[cfg_attr(feature = "inline-more", inline)]
1052
    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1053
        unsafe {
1054
            // SAFETY:
1055
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1056
            //    we will never expose `RawTable::new_uninitialized` in a public API.
1057
            //
1058
            // 2. We reserve additional space (if necessary) right after calling this function.
1059
            let mut slot = self.table.find_insert_slot(hash);
1060
1061
            // We can avoid growing the table once we have reached our load factor if we are replacing
1062
            // a tombstone. This works since the number of EMPTY slots does not change in this case.
1063
            //
1064
            // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
1065
            // in the range `0..=self.buckets()`.
1066
            let old_ctrl = *self.table.ctrl(slot.index);
1067
            if unlikely(self.table.growth_left == 0 && old_ctrl.special_is_empty()) {
1068
                self.reserve(1, hasher);
1069
                // SAFETY: We know for sure that `RawTableInner` has control bytes
1070
                // initialized and that there is extra space in the table.
1071
                slot = self.table.find_insert_slot(hash);
1072
            }
1073
1074
            self.insert_in_slot(hash, slot, value)
1075
        }
1076
    }
1077
1078
    /// Inserts a new element into the table, and returns a mutable reference to it.
1079
    ///
1080
    /// This does not check if the given element already exists in the table.
1081
    #[cfg_attr(feature = "inline-more", inline)]
1082
    pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
1083
        unsafe { self.insert(hash, value, hasher).as_mut() }
1084
    }
1085
1086
    /// Inserts a new element into the table, without growing the table.
1087
    ///
1088
    /// There must be enough space in the table to insert the new element.
1089
    ///
1090
    /// This does not check if the given element already exists in the table.
1091
    #[cfg_attr(feature = "inline-more", inline)]
1092
    #[cfg(feature = "rustc-internal-api")]
1093
    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1094
        let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1095
        let bucket = self.table.bucket(index);
1096
1097
        // If we are replacing a DELETED entry then we don't need to update
1098
        // the load counter.
1099
        self.table.growth_left -= old_ctrl.special_is_empty() as usize;
1100
1101
        bucket.write(value);
1102
        self.table.items += 1;
1103
        bucket
1104
    }
1105
1106
    /// Temporary removes a bucket, applying the given function to the removed
1107
    /// element and optionally put back the returned value in the same bucket.
1108
    ///
1109
    /// Returns `true` if the bucket still contains an element
1110
    ///
1111
    /// This does not check if the given bucket is actually occupied.
1112
    #[cfg_attr(feature = "inline-more", inline)]
1113
    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1114
    where
1115
        F: FnOnce(T) -> Option<T>,
1116
    {
1117
        let index = self.bucket_index(&bucket);
1118
        let old_ctrl = *self.table.ctrl(index);
1119
        debug_assert!(self.is_bucket_full(index));
1120
        let old_growth_left = self.table.growth_left;
1121
        let item = self.remove(bucket).0;
1122
        if let Some(new_item) = f(item) {
1123
            self.table.growth_left = old_growth_left;
1124
            self.table.set_ctrl(index, old_ctrl);
1125
            self.table.items += 1;
1126
            self.bucket(index).write(new_item);
1127
            true
1128
        } else {
1129
            false
1130
        }
1131
    }
1132
1133
    /// Searches for an element in the table. If the element is not found,
1134
    /// returns `Err` with the position of a slot where an element with the
1135
    /// same hash could be inserted.
1136
    ///
1137
    /// This function may resize the table if additional space is required for
1138
    /// inserting an element.
1139
    #[inline]
1140
337k
    pub fn find_or_find_insert_slot(
1141
337k
        &mut self,
1142
337k
        hash: u64,
1143
337k
        mut eq: impl FnMut(&T) -> bool,
1144
337k
        hasher: impl Fn(&T) -> u64,
1145
337k
    ) -> Result<Bucket<T>, InsertSlot> {
1146
337k
        self.reserve(1, hasher);
1147
337k
1148
337k
        unsafe {
1149
337k
            // SAFETY:
1150
337k
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151
337k
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152
337k
            //    never expose `RawTable::new_uninitialized` in a public API.
1153
337k
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154
337k
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155
337k
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156
337k
            match self
1157
337k
                .table
1158
337k
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Line
Count
Source
1158
76
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Line
Count
Source
1158
818
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Line
Count
Source
1158
271
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Line
Count
Source
1158
39
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>::{closure#0}
Line
Count
Source
1158
87
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1159
            {
1160
                // SAFETY: See explanation above.
1161
0
                Ok(index) => Ok(self.bucket(index)),
1162
337k
                Err(slot) => Err(slot),
1163
            }
1164
        }
1165
337k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
1140
52.3k
    pub fn find_or_find_insert_slot(
1141
52.3k
        &mut self,
1142
52.3k
        hash: u64,
1143
52.3k
        mut eq: impl FnMut(&T) -> bool,
1144
52.3k
        hasher: impl Fn(&T) -> u64,
1145
52.3k
    ) -> Result<Bucket<T>, InsertSlot> {
1146
52.3k
        self.reserve(1, hasher);
1147
52.3k
1148
52.3k
        unsafe {
1149
52.3k
            // SAFETY:
1150
52.3k
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151
52.3k
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152
52.3k
            //    never expose `RawTable::new_uninitialized` in a public API.
1153
52.3k
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154
52.3k
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155
52.3k
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156
52.3k
            match self
1157
52.3k
                .table
1158
52.3k
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1159
            {
1160
                // SAFETY: See explanation above.
1161
0
                Ok(index) => Ok(self.bucket(index)),
1162
52.3k
                Err(slot) => Err(slot),
1163
            }
1164
        }
1165
52.3k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
1140
182k
    pub fn find_or_find_insert_slot(
1141
182k
        &mut self,
1142
182k
        hash: u64,
1143
182k
        mut eq: impl FnMut(&T) -> bool,
1144
182k
        hasher: impl Fn(&T) -> u64,
1145
182k
    ) -> Result<Bucket<T>, InsertSlot> {
1146
182k
        self.reserve(1, hasher);
1147
182k
1148
182k
        unsafe {
1149
182k
            // SAFETY:
1150
182k
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151
182k
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152
182k
            //    never expose `RawTable::new_uninitialized` in a public API.
1153
182k
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154
182k
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155
182k
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156
182k
            match self
1157
182k
                .table
1158
182k
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1159
            {
1160
                // SAFETY: See explanation above.
1161
0
                Ok(index) => Ok(self.bucket(index)),
1162
182k
                Err(slot) => Err(slot),
1163
            }
1164
        }
1165
182k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
1140
70.9k
    pub fn find_or_find_insert_slot(
1141
70.9k
        &mut self,
1142
70.9k
        hash: u64,
1143
70.9k
        mut eq: impl FnMut(&T) -> bool,
1144
70.9k
        hasher: impl Fn(&T) -> u64,
1145
70.9k
    ) -> Result<Bucket<T>, InsertSlot> {
1146
70.9k
        self.reserve(1, hasher);
1147
70.9k
1148
70.9k
        unsafe {
1149
70.9k
            // SAFETY:
1150
70.9k
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151
70.9k
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152
70.9k
            //    never expose `RawTable::new_uninitialized` in a public API.
1153
70.9k
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154
70.9k
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155
70.9k
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156
70.9k
            match self
1157
70.9k
                .table
1158
70.9k
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1159
            {
1160
                // SAFETY: See explanation above.
1161
0
                Ok(index) => Ok(self.bucket(index)),
1162
70.9k
                Err(slot) => Err(slot),
1163
            }
1164
        }
1165
70.9k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
1140
20.4k
    pub fn find_or_find_insert_slot(
1141
20.4k
        &mut self,
1142
20.4k
        hash: u64,
1143
20.4k
        mut eq: impl FnMut(&T) -> bool,
1144
20.4k
        hasher: impl Fn(&T) -> u64,
1145
20.4k
    ) -> Result<Bucket<T>, InsertSlot> {
1146
20.4k
        self.reserve(1, hasher);
1147
20.4k
1148
20.4k
        unsafe {
1149
20.4k
            // SAFETY:
1150
20.4k
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151
20.4k
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152
20.4k
            //    never expose `RawTable::new_uninitialized` in a public API.
1153
20.4k
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154
20.4k
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155
20.4k
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156
20.4k
            match self
1157
20.4k
                .table
1158
20.4k
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1159
            {
1160
                // SAFETY: See explanation above.
1161
0
                Ok(index) => Ok(self.bucket(index)),
1162
20.4k
                Err(slot) => Err(slot),
1163
            }
1164
        }
1165
20.4k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}, hashbrown::map::make_hasher<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>, foldhash::fast::RandomState>::{closure#0}>
Line
Count
Source
1140
11.2k
    pub fn find_or_find_insert_slot(
1141
11.2k
        &mut self,
1142
11.2k
        hash: u64,
1143
11.2k
        mut eq: impl FnMut(&T) -> bool,
1144
11.2k
        hasher: impl Fn(&T) -> u64,
1145
11.2k
    ) -> Result<Bucket<T>, InsertSlot> {
1146
11.2k
        self.reserve(1, hasher);
1147
11.2k
1148
11.2k
        unsafe {
1149
11.2k
            // SAFETY:
1150
11.2k
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151
11.2k
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152
11.2k
            //    never expose `RawTable::new_uninitialized` in a public API.
1153
11.2k
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154
11.2k
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155
11.2k
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156
11.2k
            match self
1157
11.2k
                .table
1158
11.2k
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1159
            {
1160
                // SAFETY: See explanation above.
1161
0
                Ok(index) => Ok(self.bucket(index)),
1162
11.2k
                Err(slot) => Err(slot),
1163
            }
1164
        }
1165
11.2k
    }
1166
1167
    /// Inserts a new element into the table in the given slot, and returns its
1168
    /// raw bucket.
1169
    ///
1170
    /// # Safety
1171
    ///
1172
    /// `slot` must point to a slot previously returned by
1173
    /// `find_or_find_insert_slot`, and no mutation of the table must have
1174
    /// occurred since that call.
1175
    #[inline]
1176
337k
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177
337k
        let old_ctrl = *self.table.ctrl(slot.index);
1178
337k
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
337k
1180
337k
        let bucket = self.bucket(slot.index);
1181
337k
        bucket.write(value);
1182
337k
        bucket
1183
337k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::insert_in_slot
Line
Count
Source
1176
52.3k
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177
52.3k
        let old_ctrl = *self.table.ctrl(slot.index);
1178
52.3k
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
52.3k
1180
52.3k
        let bucket = self.bucket(slot.index);
1181
52.3k
        bucket.write(value);
1182
52.3k
        bucket
1183
52.3k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::insert_in_slot
Line
Count
Source
1176
182k
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177
182k
        let old_ctrl = *self.table.ctrl(slot.index);
1178
182k
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
182k
1180
182k
        let bucket = self.bucket(slot.index);
1181
182k
        bucket.write(value);
1182
182k
        bucket
1183
182k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::insert_in_slot
Line
Count
Source
1176
70.9k
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177
70.9k
        let old_ctrl = *self.table.ctrl(slot.index);
1178
70.9k
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
70.9k
1180
70.9k
        let bucket = self.bucket(slot.index);
1181
70.9k
        bucket.write(value);
1182
70.9k
        bucket
1183
70.9k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::insert_in_slot
Line
Count
Source
1176
20.4k
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177
20.4k
        let old_ctrl = *self.table.ctrl(slot.index);
1178
20.4k
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
20.4k
1180
20.4k
        let bucket = self.bucket(slot.index);
1181
20.4k
        bucket.write(value);
1182
20.4k
        bucket
1183
20.4k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::insert_in_slot
Line
Count
Source
1176
11.2k
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177
11.2k
        let old_ctrl = *self.table.ctrl(slot.index);
1178
11.2k
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
11.2k
1180
11.2k
        let bucket = self.bucket(slot.index);
1181
11.2k
        bucket.write(value);
1182
11.2k
        bucket
1183
11.2k
    }
1184
1185
    /// Searches for an element in the table.
1186
    #[inline]
1187
1.24M
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
1.24M
        unsafe {
1189
1.24M
            // SAFETY:
1190
1.24M
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
1.24M
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
1.24M
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
1.24M
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
1.24M
            //    is safe.
1195
1.24M
            let result = self
1196
1.24M
                .table
1197
1.24M
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
12.1k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
52.3k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<[u8]>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
92.2k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
99.9k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
89.8k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
36.4k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
56.8k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
8.00k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
81.4k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
32.2k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}>::{closure#0}
Line
Count
Source
1197
2.69k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
1.24M
1199
1.24M
            // Avoid `Option::map` because it bloats LLVM IR.
1200
1.24M
            match result {
1201
                // SAFETY: See explanation above.
1202
561k
                Some(index) => Some(self.bucket(index)),
1203
683k
                None => None,
1204
            }
1205
        }
1206
1.24M
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1187
52.7k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
52.7k
        unsafe {
1189
52.7k
            // SAFETY:
1190
52.7k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
52.7k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
52.7k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
52.7k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
52.7k
            //    is safe.
1195
52.7k
            let result = self
1196
52.7k
                .table
1197
52.7k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
52.7k
1199
52.7k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
52.7k
            match result {
1201
                // SAFETY: See explanation above.
1202
12.0k
                Some(index) => Some(self.bucket(index)),
1203
40.6k
                None => None,
1204
            }
1205
        }
1206
52.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1187
194k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
194k
        unsafe {
1189
194k
            // SAFETY:
1190
194k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
194k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
194k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
194k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
194k
            //    is safe.
1195
194k
            let result = self
1196
194k
                .table
1197
194k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
194k
1199
194k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
194k
            match result {
1201
                // SAFETY: See explanation above.
1202
52.2k
                Some(index) => Some(self.bucket(index)),
1203
142k
                None => None,
1204
            }
1205
        }
1206
194k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<[u8]>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1187
209k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
209k
        unsafe {
1189
209k
            // SAFETY:
1190
209k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
209k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
209k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
209k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
209k
            //    is safe.
1195
209k
            let result = self
1196
209k
                .table
1197
209k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
209k
1199
209k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
209k
            match result {
1201
                // SAFETY: See explanation above.
1202
91.8k
                Some(index) => Some(self.bucket(index)),
1203
117k
                None => None,
1204
            }
1205
        }
1206
209k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1187
249k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
249k
        unsafe {
1189
249k
            // SAFETY:
1190
249k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
249k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
249k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
249k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
249k
            //    is safe.
1195
249k
            let result = self
1196
249k
                .table
1197
249k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
249k
1199
249k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
249k
            match result {
1201
                // SAFETY: See explanation above.
1202
99.1k
                Some(index) => Some(self.bucket(index)),
1203
150k
                None => None,
1204
            }
1205
        }
1206
249k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1187
165k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
165k
        unsafe {
1189
165k
            // SAFETY:
1190
165k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
165k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
165k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
165k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
165k
            //    is safe.
1195
165k
            let result = self
1196
165k
                .table
1197
165k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
165k
1199
165k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
165k
            match result {
1201
                // SAFETY: See explanation above.
1202
89.4k
                Some(index) => Some(self.bucket(index)),
1203
76.5k
                None => None,
1204
            }
1205
        }
1206
165k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}>
Line
Count
Source
1187
78.8k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
78.8k
        unsafe {
1189
78.8k
            // SAFETY:
1190
78.8k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
78.8k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
78.8k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
78.8k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
78.8k
            //    is safe.
1195
78.8k
            let result = self
1196
78.8k
                .table
1197
78.8k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
78.8k
1199
78.8k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
78.8k
            match result {
1201
                // SAFETY: See explanation above.
1202
36.1k
                Some(index) => Some(self.bucket(index)),
1203
42.6k
                None => None,
1204
            }
1205
        }
1206
78.8k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}>
Line
Count
Source
1187
72.0k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
72.0k
        unsafe {
1189
72.0k
            // SAFETY:
1190
72.0k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
72.0k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
72.0k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
72.0k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
72.0k
            //    is safe.
1195
72.0k
            let result = self
1196
72.0k
                .table
1197
72.0k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
72.0k
1199
72.0k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
72.0k
            match result {
1201
                // SAFETY: See explanation above.
1202
56.7k
                Some(index) => Some(self.bucket(index)),
1203
15.2k
                None => None,
1204
            }
1205
        }
1206
72.0k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>
Line
Count
Source
1187
18.9k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
18.9k
        unsafe {
1189
18.9k
            // SAFETY:
1190
18.9k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
18.9k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
18.9k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
18.9k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
18.9k
            //    is safe.
1195
18.9k
            let result = self
1196
18.9k
                .table
1197
18.9k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
18.9k
1199
18.9k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
18.9k
            match result {
1201
                // SAFETY: See explanation above.
1202
7.96k
                Some(index) => Some(self.bucket(index)),
1203
11.0k
                None => None,
1204
            }
1205
        }
1206
18.9k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>
Line
Count
Source
1187
127k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
127k
        unsafe {
1189
127k
            // SAFETY:
1190
127k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
127k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
127k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
127k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
127k
            //    is safe.
1195
127k
            let result = self
1196
127k
                .table
1197
127k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
127k
1199
127k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
127k
            match result {
1201
                // SAFETY: See explanation above.
1202
81.3k
                Some(index) => Some(self.bucket(index)),
1203
45.9k
                None => None,
1204
            }
1205
        }
1206
127k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1187
40.5k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
40.5k
        unsafe {
1189
40.5k
            // SAFETY:
1190
40.5k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
40.5k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
40.5k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
40.5k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
40.5k
            //    is safe.
1195
40.5k
            let result = self
1196
40.5k
                .table
1197
40.5k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
40.5k
1199
40.5k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
40.5k
            match result {
1201
                // SAFETY: See explanation above.
1202
32.1k
                Some(index) => Some(self.bucket(index)),
1203
8.41k
                None => None,
1204
            }
1205
        }
1206
40.5k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::find::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1187
34.8k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
34.8k
        unsafe {
1189
34.8k
            // SAFETY:
1190
34.8k
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
34.8k
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
34.8k
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
34.8k
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
34.8k
            //    is safe.
1195
34.8k
            let result = self
1196
34.8k
                .table
1197
34.8k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1198
34.8k
1199
34.8k
            // Avoid `Option::map` because it bloats LLVM IR.
1200
34.8k
            match result {
1201
                // SAFETY: See explanation above.
1202
2.63k
                Some(index) => Some(self.bucket(index)),
1203
32.1k
                None => None,
1204
            }
1205
        }
1206
34.8k
    }
1207
1208
    /// Gets a reference to an element in the table.
1209
    #[inline]
1210
    pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1211
        // Avoid `Option::map` because it bloats LLVM IR.
1212
        match self.find(hash, eq) {
1213
            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1214
            None => None,
1215
        }
1216
    }
1217
1218
    /// Gets a mutable reference to an element in the table.
1219
    #[inline]
1220
822k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
822k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
822k
        match self.find(hash, eq) {
1223
393k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
429k
            None => None,
1225
        }
1226
822k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyRef<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1220
52.7k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
52.7k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
52.7k
        match self.find(hash, eq) {
1223
12.0k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
40.6k
            None => None,
1225
        }
1226
52.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyWrapper<alloc::vec::Vec<u8>>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1220
98.1k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
98.1k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
98.1k
        match self.find(hash, eq) {
1223
45.3k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
52.8k
            None => None,
1225
        }
1226
98.1k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyWrapper<[u8]>, lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1220
170k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
170k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
170k
        match self.find(hash, eq) {
1223
87.0k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
83.7k
            None => None,
1225
        }
1226
170k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1220
249k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
249k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
249k
        match self.find(hash, eq) {
1223
98.5k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
150k
            None => None,
1225
        }
1226
249k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1220
3.19k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
3.19k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
3.19k
        match self.find(hash, eq) {
1223
504
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
2.68k
            None => None,
1225
        }
1226
3.19k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>>::{closure#0}>
Line
Count
Source
1220
78.8k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
78.8k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
78.8k
        match self.find(hash, eq) {
1223
36.1k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
42.6k
            None => None,
1225
        }
1226
78.8k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>
Line
Count
Source
1220
18.9k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
18.9k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
18.9k
        match self.find(hash, eq) {
1223
7.96k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
11.0k
            None => None,
1225
        }
1226
18.9k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyWrapper<suricata::smb::smb::SMBCommonHdr>, lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>>::{closure#0}>
Line
Count
Source
1220
110k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
110k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
110k
        match self.find(hash, eq) {
1223
73.3k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
36.8k
            None => None,
1225
        }
1226
110k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::get_mut::<hashbrown::map::equivalent_key<lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>>::{closure#0}>
Line
Count
Source
1220
40.5k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
40.5k
        // Avoid `Option::map` because it bloats LLVM IR.
1222
40.5k
        match self.find(hash, eq) {
1223
32.1k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
8.41k
            None => None,
1225
        }
1226
40.5k
    }
1227
1228
    /// Attempts to get mutable references to `N` entries in the table at once.
1229
    ///
1230
    /// Returns an array of length `N` with the results of each query.
1231
    ///
1232
    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1233
    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1234
    ///
1235
    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1236
    /// the `i`th key to be looked up.
1237
    pub fn get_many_mut<const N: usize>(
1238
        &mut self,
1239
        hashes: [u64; N],
1240
        eq: impl FnMut(usize, &T) -> bool,
1241
    ) -> [Option<&'_ mut T>; N] {
1242
        unsafe {
1243
            let ptrs = self.get_many_mut_pointers(hashes, eq);
1244
1245
            for (i, cur) in ptrs.iter().enumerate() {
1246
                if cur.is_some() && ptrs[..i].contains(cur) {
1247
                    panic!("duplicate keys found");
1248
                }
1249
            }
1250
            // All bucket are distinct from all previous buckets so we're clear to return the result
1251
            // of the lookup.
1252
1253
            ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1254
        }
1255
    }
1256
1257
    pub unsafe fn get_many_unchecked_mut<const N: usize>(
1258
        &mut self,
1259
        hashes: [u64; N],
1260
        eq: impl FnMut(usize, &T) -> bool,
1261
    ) -> [Option<&'_ mut T>; N] {
1262
        let ptrs = self.get_many_mut_pointers(hashes, eq);
1263
        ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1264
    }
1265
1266
    unsafe fn get_many_mut_pointers<const N: usize>(
1267
        &mut self,
1268
        hashes: [u64; N],
1269
        mut eq: impl FnMut(usize, &T) -> bool,
1270
    ) -> [Option<NonNull<T>>; N] {
1271
        array::from_fn(|i| {
1272
            self.find(hashes[i], |k| eq(i, k))
1273
                .map(|cur| cur.as_non_null())
1274
        })
1275
    }
1276
1277
    /// Returns the number of elements the map can hold without reallocating.
1278
    ///
1279
    /// This number is a lower bound; the table might be able to hold
1280
    /// more, but is guaranteed to be able to hold at least this many.
1281
    #[inline]
1282
    pub fn capacity(&self) -> usize {
1283
        self.table.items + self.table.growth_left
1284
    }
1285
1286
    /// Returns the number of elements in the table.
1287
    #[inline]
1288
1.41M
    pub fn len(&self) -> usize {
1289
1.41M
        self.table.items
1290
1.41M
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::len
Line
Count
Source
1288
509k
    pub fn len(&self) -> usize {
1289
509k
        self.table.items
1290
509k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::len
Line
Count
Source
1288
466k
    pub fn len(&self) -> usize {
1289
466k
        self.table.items
1290
466k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::len
Line
Count
Source
1288
178k
    pub fn len(&self) -> usize {
1289
178k
        self.table.items
1290
178k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::len
Line
Count
Source
1288
206k
    pub fn len(&self) -> usize {
1289
206k
        self.table.items
1290
206k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::len
Line
Count
Source
1288
54.7k
    pub fn len(&self) -> usize {
1289
54.7k
        self.table.items
1290
54.7k
    }
1291
1292
    /// Returns `true` if the table contains no elements.
1293
    #[inline]
1294
1.07M
    pub fn is_empty(&self) -> bool {
1295
1.07M
        self.len() == 0
1296
1.07M
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::is_empty
Line
Count
Source
1294
457k
    pub fn is_empty(&self) -> bool {
1295
457k
        self.len() == 0
1296
457k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::is_empty
Line
Count
Source
1294
284k
    pub fn is_empty(&self) -> bool {
1295
284k
        self.len() == 0
1296
284k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::is_empty
Line
Count
Source
1294
107k
    pub fn is_empty(&self) -> bool {
1295
107k
        self.len() == 0
1296
107k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::is_empty
Line
Count
Source
1294
186k
    pub fn is_empty(&self) -> bool {
1295
186k
        self.len() == 0
1296
186k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::is_empty
Line
Count
Source
1294
43.4k
    pub fn is_empty(&self) -> bool {
1295
43.4k
        self.len() == 0
1296
43.4k
    }
1297
1298
    /// Returns the number of buckets in the table.
1299
    #[inline]
1300
    pub fn buckets(&self) -> usize {
1301
        self.table.bucket_mask + 1
1302
    }
1303
1304
    /// Checks whether the bucket at `index` is full.
1305
    ///
1306
    /// # Safety
1307
    ///
1308
    /// The caller must ensure `index` is less than the number of buckets.
1309
    #[inline]
1310
    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1311
        self.table.is_bucket_full(index)
1312
    }
1313
1314
    /// Returns an iterator over every element in the table. It is up to
1315
    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1316
    /// Because we cannot make the `next` method unsafe on the `RawIter`
1317
    /// struct, we have to make the `iter` method unsafe.
1318
    #[inline]
1319
193k
    pub unsafe fn iter(&self) -> RawIter<T> {
1320
193k
        // SAFETY:
1321
193k
        // 1. The caller must uphold the safety contract for `iter` method.
1322
193k
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323
193k
        //    we will never expose RawTable::new_uninitialized in a public API.
1324
193k
        self.table.iter()
1325
193k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::iter
Line
Count
Source
1319
38.7k
    pub unsafe fn iter(&self) -> RawIter<T> {
1320
38.7k
        // SAFETY:
1321
38.7k
        // 1. The caller must uphold the safety contract for `iter` method.
1322
38.7k
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323
38.7k
        //    we will never expose RawTable::new_uninitialized in a public API.
1324
38.7k
        self.table.iter()
1325
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::iter
Line
Count
Source
1319
38.7k
    pub unsafe fn iter(&self) -> RawIter<T> {
1320
38.7k
        // SAFETY:
1321
38.7k
        // 1. The caller must uphold the safety contract for `iter` method.
1322
38.7k
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323
38.7k
        //    we will never expose RawTable::new_uninitialized in a public API.
1324
38.7k
        self.table.iter()
1325
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::iter
Line
Count
Source
1319
38.7k
    pub unsafe fn iter(&self) -> RawIter<T> {
1320
38.7k
        // SAFETY:
1321
38.7k
        // 1. The caller must uphold the safety contract for `iter` method.
1322
38.7k
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323
38.7k
        //    we will never expose RawTable::new_uninitialized in a public API.
1324
38.7k
        self.table.iter()
1325
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::iter
Line
Count
Source
1319
38.7k
    pub unsafe fn iter(&self) -> RawIter<T> {
1320
38.7k
        // SAFETY:
1321
38.7k
        // 1. The caller must uphold the safety contract for `iter` method.
1322
38.7k
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323
38.7k
        //    we will never expose RawTable::new_uninitialized in a public API.
1324
38.7k
        self.table.iter()
1325
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::iter
Line
Count
Source
1319
38.7k
    pub unsafe fn iter(&self) -> RawIter<T> {
1320
38.7k
        // SAFETY:
1321
38.7k
        // 1. The caller must uphold the safety contract for `iter` method.
1322
38.7k
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323
38.7k
        //    we will never expose RawTable::new_uninitialized in a public API.
1324
38.7k
        self.table.iter()
1325
38.7k
    }
1326
1327
    /// Returns an iterator over occupied buckets that could match a given hash.
1328
    ///
1329
    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1330
    /// return items that have a hash value different than the one provided. You
1331
    /// should always validate the returned values before using them.
1332
    ///
1333
    /// It is up to the caller to ensure that the `RawTable` outlives the
1334
    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1335
    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1336
    #[cfg_attr(feature = "inline-more", inline)]
1337
    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1338
        RawIterHash::new(self, hash)
1339
    }
1340
1341
    /// Returns an iterator which removes all elements from the table without
1342
    /// freeing the memory.
1343
    #[cfg_attr(feature = "inline-more", inline)]
1344
193k
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345
193k
        unsafe {
1346
193k
            let iter = self.iter();
1347
193k
            self.drain_iter_from(iter)
1348
193k
        }
1349
193k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::drain
Line
Count
Source
1344
38.7k
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345
38.7k
        unsafe {
1346
38.7k
            let iter = self.iter();
1347
38.7k
            self.drain_iter_from(iter)
1348
38.7k
        }
1349
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::drain
Line
Count
Source
1344
38.7k
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345
38.7k
        unsafe {
1346
38.7k
            let iter = self.iter();
1347
38.7k
            self.drain_iter_from(iter)
1348
38.7k
        }
1349
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::drain
Line
Count
Source
1344
38.7k
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345
38.7k
        unsafe {
1346
38.7k
            let iter = self.iter();
1347
38.7k
            self.drain_iter_from(iter)
1348
38.7k
        }
1349
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::drain
Line
Count
Source
1344
38.7k
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345
38.7k
        unsafe {
1346
38.7k
            let iter = self.iter();
1347
38.7k
            self.drain_iter_from(iter)
1348
38.7k
        }
1349
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::drain
Line
Count
Source
1344
38.7k
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345
38.7k
        unsafe {
1346
38.7k
            let iter = self.iter();
1347
38.7k
            self.drain_iter_from(iter)
1348
38.7k
        }
1349
38.7k
    }
1350
1351
    /// Returns an iterator which removes all elements from the table without
1352
    /// freeing the memory.
1353
    ///
1354
    /// Iteration starts at the provided iterator's current location.
1355
    ///
1356
    /// It is up to the caller to ensure that the iterator is valid for this
1357
    /// `RawTable` and covers all items that remain in the table.
1358
    #[cfg_attr(feature = "inline-more", inline)]
1359
193k
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360
193k
        debug_assert_eq!(iter.len(), self.len());
1361
193k
        RawDrain {
1362
193k
            iter,
1363
193k
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364
193k
            orig_table: NonNull::from(&mut self.table),
1365
193k
            marker: PhantomData,
1366
193k
        }
1367
193k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::drain_iter_from
Line
Count
Source
1359
38.7k
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360
38.7k
        debug_assert_eq!(iter.len(), self.len());
1361
38.7k
        RawDrain {
1362
38.7k
            iter,
1363
38.7k
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364
38.7k
            orig_table: NonNull::from(&mut self.table),
1365
38.7k
            marker: PhantomData,
1366
38.7k
        }
1367
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::drain_iter_from
Line
Count
Source
1359
38.7k
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360
38.7k
        debug_assert_eq!(iter.len(), self.len());
1361
38.7k
        RawDrain {
1362
38.7k
            iter,
1363
38.7k
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364
38.7k
            orig_table: NonNull::from(&mut self.table),
1365
38.7k
            marker: PhantomData,
1366
38.7k
        }
1367
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::drain_iter_from
Line
Count
Source
1359
38.7k
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360
38.7k
        debug_assert_eq!(iter.len(), self.len());
1361
38.7k
        RawDrain {
1362
38.7k
            iter,
1363
38.7k
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364
38.7k
            orig_table: NonNull::from(&mut self.table),
1365
38.7k
            marker: PhantomData,
1366
38.7k
        }
1367
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::drain_iter_from
Line
Count
Source
1359
38.7k
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360
38.7k
        debug_assert_eq!(iter.len(), self.len());
1361
38.7k
        RawDrain {
1362
38.7k
            iter,
1363
38.7k
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364
38.7k
            orig_table: NonNull::from(&mut self.table),
1365
38.7k
            marker: PhantomData,
1366
38.7k
        }
1367
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::drain_iter_from
Line
Count
Source
1359
38.7k
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360
38.7k
        debug_assert_eq!(iter.len(), self.len());
1361
38.7k
        RawDrain {
1362
38.7k
            iter,
1363
38.7k
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364
38.7k
            orig_table: NonNull::from(&mut self.table),
1365
38.7k
            marker: PhantomData,
1366
38.7k
        }
1367
38.7k
    }
1368
1369
    /// Returns an iterator which consumes all elements from the table.
1370
    ///
1371
    /// Iteration starts at the provided iterator's current location.
1372
    ///
1373
    /// It is up to the caller to ensure that the iterator is valid for this
1374
    /// `RawTable` and covers all items that remain in the table.
1375
    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1376
        debug_assert_eq!(iter.len(), self.len());
1377
1378
        let allocation = self.into_allocation();
1379
        RawIntoIter {
1380
            iter,
1381
            allocation,
1382
            marker: PhantomData,
1383
        }
1384
    }
1385
1386
    /// Converts the table into a raw allocation. The contents of the table
1387
    /// should be dropped using a `RawIter` before freeing the allocation.
1388
    #[cfg_attr(feature = "inline-more", inline)]
1389
    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1390
        let alloc = if self.table.is_empty_singleton() {
1391
            None
1392
        } else {
1393
            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1394
            let (layout, ctrl_offset) =
1395
                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1396
                    Some(lco) => lco,
1397
                    None => unsafe { hint::unreachable_unchecked() },
1398
                };
1399
            Some((
1400
                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset).cast()) },
1401
                layout,
1402
                unsafe { ptr::read(&self.alloc) },
1403
            ))
1404
        };
1405
        mem::forget(self);
1406
        alloc
1407
    }
1408
}
1409
1410
unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1411
where
1412
    T: Send,
1413
    A: Send,
1414
{
1415
}
1416
unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1417
where
1418
    T: Sync,
1419
    A: Sync,
1420
{
1421
}
1422
1423
impl RawTableInner {
1424
    const NEW: Self = RawTableInner::new();
1425
1426
    /// Creates a new empty hash table without allocating any memory.
1427
    ///
1428
    /// In effect this returns a table with exactly 1 bucket. However we can
1429
    /// leave the data pointer dangling since that bucket is never accessed
1430
    /// due to our load factor forcing us to always have at least 1 free bucket.
1431
    #[inline]
1432
    const fn new() -> Self {
1433
        Self {
1434
            // Be careful to cast the entire slice to a raw pointer.
1435
            ctrl: unsafe {
1436
                NonNull::new_unchecked(Group::static_empty().as_ptr().cast_mut().cast())
1437
            },
1438
            bucket_mask: 0,
1439
            items: 0,
1440
            growth_left: 0,
1441
        }
1442
    }
1443
}
1444
1445
impl RawTableInner {
1446
    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1447
    /// The control bytes and buckets are left uninitialized.
1448
    ///
1449
    /// # Safety
1450
    ///
1451
    /// The caller of this function must ensure that the `buckets` is power of two
1452
    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1453
    /// Group::WIDTH` with the [`Tag::EMPTY`] bytes.
1454
    ///
1455
    /// See also [`Allocator`] API for other safety concerns.
1456
    ///
1457
    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1458
    #[cfg_attr(feature = "inline-more", inline)]
1459
193k
    unsafe fn new_uninitialized<A>(
1460
193k
        alloc: &A,
1461
193k
        table_layout: TableLayout,
1462
193k
        buckets: usize,
1463
193k
        fallibility: Fallibility,
1464
193k
    ) -> Result<Self, TryReserveError>
1465
193k
    where
1466
193k
        A: Allocator,
1467
193k
    {
1468
193k
        debug_assert!(buckets.is_power_of_two());
1469
1470
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1471
193k
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1472
193k
            Some(lco) => lco,
1473
0
            None => return Err(fallibility.capacity_overflow()),
1474
        };
1475
1476
193k
        let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1477
193k
            Ok(block) => block.cast(),
1478
0
            Err(_) => return Err(fallibility.alloc_err(layout)),
1479
        };
1480
1481
        // SAFETY: null pointer will be caught in above check
1482
193k
        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1483
193k
        Ok(Self {
1484
193k
            ctrl,
1485
193k
            bucket_mask: buckets - 1,
1486
193k
            items: 0,
1487
193k
            growth_left: bucket_mask_to_capacity(buckets - 1),
1488
193k
        })
1489
193k
    }
1490
1491
    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1492
    /// capacity for inserting the given number of elements without reallocating.
1493
    ///
1494
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1495
    #[inline]
1496
193k
    fn fallible_with_capacity<A>(
1497
193k
        alloc: &A,
1498
193k
        table_layout: TableLayout,
1499
193k
        capacity: usize,
1500
193k
        fallibility: Fallibility,
1501
193k
    ) -> Result<Self, TryReserveError>
1502
193k
    where
1503
193k
        A: Allocator,
1504
193k
    {
1505
193k
        if capacity == 0 {
1506
0
            Ok(Self::NEW)
1507
        } else {
1508
            // SAFETY: We checked that we could successfully allocate the new table, and then
1509
            // initialized all control bytes with the constant `Tag::EMPTY` byte.
1510
            unsafe {
1511
193k
                let buckets = capacity_to_buckets(capacity, table_layout)
1512
193k
                    .ok_or_else(|| fallibility.capacity_overflow())?;
1513
1514
193k
                let mut result =
1515
193k
                    Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1516
                // SAFETY: We checked that the table is allocated and therefore the table already has
1517
                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1518
                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1519
193k
                result.ctrl_slice().fill_empty();
1520
193k
1521
193k
                Ok(result)
1522
            }
1523
        }
1524
193k
    }
1525
1526
    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1527
    /// the given number of elements without reallocating.
1528
    ///
1529
    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1530
    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1531
    /// handle memory allocation failure.
1532
    ///
1533
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1534
    ///
1535
    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1536
    /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
1537
193k
    fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
1538
193k
    where
1539
193k
        A: Allocator,
1540
193k
    {
1541
193k
        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1542
193k
        match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
1543
193k
            Ok(table_inner) => table_inner,
1544
            // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
1545
0
            Err(_) => unsafe { hint::unreachable_unchecked() },
1546
        }
1547
193k
    }
1548
1549
    /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
1550
    ///
1551
    /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
1552
    /// bytes outside the range of the table are filled with [`Tag::EMPTY`] entries. These will unfortunately
1553
    /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
1554
    /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
1555
    /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
1556
    /// We detect this situation here and perform a second scan starting at the beginning of the table.
1557
    /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
1558
    /// trailing control bytes (containing [`Tag::EMPTY`] bytes).
1559
    ///
1560
    /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
1561
    /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
1562
    /// `Safety`).
1563
    ///
1564
    /// # Warning
1565
    ///
1566
    /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
1567
    /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
1568
    /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
1569
    /// index will cause immediate [`undefined behavior`].
1570
    ///
1571
    /// # Safety
1572
    ///
1573
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
1574
    /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
1575
    /// of this crate, the following rules are necessary and sufficient:
1576
    ///
1577
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
1578
    ///   function results in [`undefined behavior`].
1579
    ///
1580
    /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
1581
    ///   (after the `find_insert_slot_in_group` function, but before insertion into the table).
1582
    ///
1583
    /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
1584
    ///   (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
1585
    ///
1586
    /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
1587
    /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
1588
    /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
1589
    ///
1590
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1591
    /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
1592
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1593
    #[inline]
1594
337k
    unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1595
337k
        // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1596
337k
        if unlikely(self.is_bucket_full(index)) {
1597
0
            debug_assert!(self.bucket_mask < Group::WIDTH);
1598
            // SAFETY:
1599
            //
1600
            // * Since the caller of this function ensures that the control bytes are properly
1601
            //   initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1602
            //   bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1603
            //   and points to the properly initialized control bytes (see also
1604
            //   `TableLayout::calculate_layout_for` and `ptr::read`);
1605
            //
1606
            // * Because the caller of this function ensures that the index was provided by the
1607
            //   `self.find_insert_slot_in_group()` function, so for for tables larger than the
1608
            //   group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1609
            //   branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
1610
            //   cannot return a full bucket index. For tables smaller than the group width, calling
1611
            //   the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1612
            //   the range of the table are filled with EMPTY bytes (and we know for sure that there
1613
            //   is at least one FULL bucket), so this second scan either finds an empty slot (due to
1614
            //   the load factor) or hits the trailing control bytes (containing EMPTY).
1615
0
            index = Group::load_aligned(self.ctrl(0))
1616
0
                .match_empty_or_deleted()
1617
0
                .lowest_set_bit()
1618
0
                .unwrap_unchecked();
1619
337k
        }
1620
337k
        InsertSlot { index }
1621
337k
    }
1622
1623
    /// Finds the position to insert something in a group.
1624
    ///
1625
    /// **This may have false positives and must be fixed up with `fix_insert_slot`
1626
    /// before it's used.**
1627
    ///
1628
    /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
1629
    /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
1630
    #[inline]
1631
337k
    fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1632
337k
        let bit = group.match_empty_or_deleted().lowest_set_bit();
1633
337k
1634
337k
        if likely(bit.is_some()) {
1635
            // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1636
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1637
337k
            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1638
        } else {
1639
0
            None
1640
        }
1641
337k
    }
1642
1643
    /// Searches for an element in the table, or a potential slot where that element could
1644
    /// be inserted (an empty or deleted [`Bucket`] index).
1645
    ///
1646
    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1647
    /// eliminated by LLVM optimizations.
1648
    ///
1649
    /// This function does not make any changes to the `data` part of the table, or any
1650
    /// changes to the `items` or `growth_left` field of the table.
1651
    ///
1652
    /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
1653
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
1654
    /// will never return (will go into an infinite loop) for tables larger than the group
1655
    /// width, or return an index outside of the table indices range if the table is less
1656
    /// than the group width.
1657
    ///
1658
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1659
    /// function with only `FULL` buckets' indices and return the `index` of the found
1660
    /// element (as `Ok(index)`). If the element is not found and there is at least 1
1661
    /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
1662
    /// [`InsertSlot`] with an index in the range `0..self.buckets()`, but in any case,
1663
    /// if this function returns [`InsertSlot`], it will contain an index in the range
1664
    /// `0..=self.buckets()`.
1665
    ///
1666
    /// # Safety
1667
    ///
1668
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1669
    /// this function results in [`undefined behavior`].
1670
    ///
1671
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1672
    /// less than the group width and if there was not at least one empty or deleted bucket in
1673
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1674
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1675
    /// control bytes outside the table range.
1676
    ///
1677
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1678
    #[inline]
1679
337k
    unsafe fn find_or_find_insert_slot_inner(
1680
337k
        &self,
1681
337k
        hash: u64,
1682
337k
        eq: &mut dyn FnMut(usize) -> bool,
1683
337k
    ) -> Result<usize, InsertSlot> {
1684
337k
        let mut insert_slot = None;
1685
337k
1686
337k
        let tag_hash = Tag::full(hash);
1687
337k
        let mut probe_seq = self.probe_seq(hash);
1688
1689
        loop {
1690
            // SAFETY:
1691
            // * Caller of this function ensures that the control bytes are properly initialized.
1692
            //
1693
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1694
            //   of the table due to masking with `self.bucket_mask` and also because the number
1695
            //   of buckets is a power of two (see `self.probe_seq` function).
1696
            //
1697
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1698
            //   call `Group::load` due to the extended control bytes range, which is
1699
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1700
            //   byte will never be read for the allocated table);
1701
            //
1702
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1703
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1704
            //   bytes, which is safe (see RawTableInner::new).
1705
337k
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1706
1707
337k
            for bit in group.match_tag(tag_hash) {
1708
1.29k
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1709
1.29k
1710
1.29k
                if likely(eq(index)) {
1711
0
                    return Ok(index);
1712
1.29k
                }
1713
            }
1714
1715
            // We didn't find the element we were looking for in the group, try to get an
1716
            // insertion slot from the group if we don't have one yet.
1717
337k
            if likely(insert_slot.is_none()) {
1718
337k
                insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1719
337k
            }
1720
1721
337k
            if let Some(insert_slot) = insert_slot {
1722
                // Only stop the search if the group contains at least one empty element.
1723
                // Otherwise, the element that we are looking for might be in a following group.
1724
337k
                if likely(group.match_empty().any_bit_set()) {
1725
                    // We must have found a insert slot by now, since the current group contains at
1726
                    // least one. For tables smaller than the group width, there will still be an
1727
                    // empty element in the current (and only) group due to the load factor.
1728
                    unsafe {
1729
                        // SAFETY:
1730
                        // * Caller of this function ensures that the control bytes are properly initialized.
1731
                        //
1732
                        // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1733
337k
                        return Err(self.fix_insert_slot(insert_slot));
1734
                    }
1735
4
                }
1736
0
            }
1737
1738
4
            probe_seq.move_next(self.bucket_mask);
1739
        }
1740
337k
    }
1741
1742
    /// Searches for an empty or deleted bucket which is suitable for inserting a new
1743
    /// element and sets the hash for that slot. Returns an index of that slot and the
1744
    /// old control byte stored in the found index.
1745
    ///
1746
    /// This function does not check if the given element exists in the table. Also,
1747
    /// this function does not check if there is enough space in the table to insert
1748
    /// a new element. The caller of the function must make sure that the table has at
1749
    /// least 1 empty or deleted `bucket`, otherwise this function will never return
1750
    /// (will go into an infinite loop) for tables larger than the group width, or
1751
    /// return an index outside of the table indices range if the table is less than
1752
    /// the group width.
1753
    ///
1754
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1755
    /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
1756
    /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
1757
    ///
1758
    /// This function does not make any changes to the `data` parts of the table,
1759
    /// or any changes to the `items` or `growth_left` field of the table.
1760
    ///
1761
    /// # Safety
1762
    ///
1763
    /// The safety rules are directly derived from the safety rules for the
1764
    /// [`RawTableInner::set_ctrl_hash`] and [`RawTableInner::find_insert_slot`] methods.
1765
    /// Thus, in order to uphold the safety contracts for that methods, as well as for
1766
    /// the correct logic of the work of this crate, you must observe the following rules
1767
    /// when calling this function:
1768
    ///
1769
    /// * The [`RawTableInner`] has already been allocated and has properly initialized
1770
    ///   control bytes otherwise calling this function results in [`undefined behavior`].
1771
    ///
1772
    /// * The caller of this function must ensure that the "data" parts of the table
1773
    ///   will have an entry in the returned index (matching the given hash) right
1774
    ///   after calling this function.
1775
    ///
1776
    /// Attempt to write data at the `index` returned by this function when the table is
1777
    /// less than the group width and if there was not at least one empty or deleted bucket in
1778
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1779
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1780
    /// control bytes outside the table range.
1781
    ///
1782
    /// The caller must independently increase the `items` field of the table, and also,
1783
    /// if the old control byte was [`Tag::EMPTY`], then decrease the table's `growth_left`
1784
    /// field, and do not change it if the old control byte was [`Tag::DELETED`].
1785
    ///
1786
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1787
    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
1788
    ///
1789
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1790
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1791
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1792
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
1793
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1794
    #[inline]
1795
0
    unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, Tag) {
1796
0
        // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
1797
0
        let index: usize = self.find_insert_slot(hash).index;
1798
0
        // SAFETY:
1799
0
        // 1. The `find_insert_slot` function either returns an `index` less than or
1800
0
        //    equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
1801
0
        //    returns if it cannot find an empty or deleted slot.
1802
0
        // 2. The caller of this function guarantees that the table has already been
1803
0
        //    allocated
1804
0
        let old_ctrl = *self.ctrl(index);
1805
0
        self.set_ctrl_hash(index, hash);
1806
0
        (index, old_ctrl)
1807
0
    }
1808
1809
    /// Searches for an empty or deleted bucket which is suitable for inserting
1810
    /// a new element, returning the `index` for the new [`Bucket`].
1811
    ///
1812
    /// This function does not make any changes to the `data` part of the table, or any
1813
    /// changes to the `items` or `growth_left` field of the table.
1814
    ///
1815
    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
1816
    /// will never return (will go into an infinite loop) for tables larger than the group
1817
    /// width, or return an index outside of the table indices range if the table is less
1818
    /// than the group width.
1819
    ///
1820
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1821
    /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
1822
    /// but in any case, if this function returns [`InsertSlot`], it will contain an index
1823
    /// in the range `0..=self.buckets()`.
1824
    ///
1825
    /// # Safety
1826
    ///
1827
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1828
    /// this function results in [`undefined behavior`].
1829
    ///
1830
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1831
    /// less than the group width and if there was not at least one empty or deleted bucket in
1832
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1833
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1834
    /// control bytes outside the table range.
1835
    ///
1836
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1837
    #[inline]
1838
0
    unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
1839
0
        let mut probe_seq = self.probe_seq(hash);
1840
        loop {
1841
            // SAFETY:
1842
            // * Caller of this function ensures that the control bytes are properly initialized.
1843
            //
1844
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1845
            //   of the table due to masking with `self.bucket_mask` and also because the number
1846
            //   of buckets is a power of two (see `self.probe_seq` function).
1847
            //
1848
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1849
            //   call `Group::load` due to the extended control bytes range, which is
1850
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1851
            //   byte will never be read for the allocated table);
1852
            //
1853
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1854
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1855
            //   bytes, which is safe (see RawTableInner::new).
1856
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1857
0
1858
0
            let index = self.find_insert_slot_in_group(&group, &probe_seq);
1859
0
            if likely(index.is_some()) {
1860
                // SAFETY:
1861
                // * Caller of this function ensures that the control bytes are properly initialized.
1862
                //
1863
                // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1864
                unsafe {
1865
0
                    return self.fix_insert_slot(index.unwrap_unchecked());
1866
                }
1867
0
            }
1868
0
            probe_seq.move_next(self.bucket_mask);
1869
        }
1870
0
    }
1871
1872
    /// Searches for an element in a table, returning the `index` of the found element.
1873
    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
1874
    /// eliminated by LLVM optimizations.
1875
    ///
1876
    /// This function does not make any changes to the `data` part of the table, or any
1877
    /// changes to the `items` or `growth_left` field of the table.
1878
    ///
1879
    /// The table must have at least 1 empty `bucket`, otherwise, if the
1880
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
1881
    /// this function will also never return (will go into an infinite loop).
1882
    ///
1883
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1884
    /// function with only `FULL` buckets' indices and return the `index` of the found
1885
    /// element as `Some(index)`, so the index will always be in the range
1886
    /// `0..self.buckets()`.
1887
    ///
1888
    /// # Safety
1889
    ///
1890
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1891
    /// this function results in [`undefined behavior`].
1892
    ///
1893
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1894
    #[inline(always)]
1895
1.24M
    unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
1896
1.24M
        let tag_hash = Tag::full(hash);
1897
1.24M
        let mut probe_seq = self.probe_seq(hash);
1898
1899
        loop {
1900
            // SAFETY:
1901
            // * Caller of this function ensures that the control bytes are properly initialized.
1902
            //
1903
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1904
            //   of the table due to masking with `self.bucket_mask`.
1905
            //
1906
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1907
            //   call `Group::load` due to the extended control bytes range, which is
1908
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1909
            //   byte will never be read for the allocated table);
1910
            //
1911
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1912
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1913
            //   bytes, which is safe (see RawTableInner::new_in).
1914
1.24M
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1915
1916
1.24M
            for bit in group.match_tag(tag_hash) {
1917
                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1918
                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1919
564k
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1920
564k
1921
564k
                if likely(eq(index)) {
1922
561k
                    return Some(index);
1923
2.45k
                }
1924
            }
1925
1926
683k
            if likely(group.match_empty().any_bit_set()) {
1927
683k
                return None;
1928
4
            }
1929
4
1930
4
            probe_seq.move_next(self.bucket_mask);
1931
        }
1932
1.24M
    }
1933
1934
    /// Prepares for rehashing data in place (that is, without allocating new memory).
1935
    /// Converts all full index `control bytes` to `Tag::DELETED` and all `Tag::DELETED` control
1936
    /// bytes to `Tag::EMPTY`, i.e. performs the following conversion:
1937
    ///
1938
    /// - `Tag::EMPTY` control bytes   -> `Tag::EMPTY`;
1939
    /// - `Tag::DELETED` control bytes -> `Tag::EMPTY`;
1940
    /// - `FULL` control bytes    -> `Tag::DELETED`.
1941
    ///
1942
    /// This function does not make any changes to the `data` parts of the table,
1943
    /// or any changes to the `items` or `growth_left` field of the table.
1944
    ///
1945
    /// # Safety
1946
    ///
1947
    /// You must observe the following safety rules when calling this function:
1948
    ///
1949
    /// * The [`RawTableInner`] has already been allocated;
1950
    ///
1951
    /// * The caller of this function must convert the `Tag::DELETED` bytes back to `FULL`
1952
    ///   bytes when re-inserting them into their ideal position (which was impossible
1953
    ///   to do during the first insert due to tombstones). If the caller does not do
1954
    ///   this, then calling this function may result in a memory leak.
1955
    ///
1956
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
1957
    ///   calling this function results in [`undefined behavior`].
1958
    ///
1959
    /// Calling this function on a table that has not been allocated results in
1960
    /// [`undefined behavior`].
1961
    ///
1962
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1963
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
1964
    ///
1965
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1966
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1967
    #[allow(clippy::mut_mut)]
1968
    #[inline]
1969
0
    unsafe fn prepare_rehash_in_place(&mut self) {
1970
        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
1971
        // This effectively frees up all buckets containing a DELETED entry.
1972
        //
1973
        // SAFETY:
1974
        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
1975
        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
1976
        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
1977
        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
1978
        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
1979
        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
1980
0
        for i in (0..self.buckets()).step_by(Group::WIDTH) {
1981
0
            let group = Group::load_aligned(self.ctrl(i));
1982
0
            let group = group.convert_special_to_empty_and_full_to_deleted();
1983
0
            group.store_aligned(self.ctrl(i));
1984
0
        }
1985
1986
        // Fix up the trailing control bytes. See the comments in set_ctrl
1987
        // for the handling of tables smaller than the group width.
1988
        //
1989
        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
1990
        // has already been allocated
1991
0
        if unlikely(self.buckets() < Group::WIDTH) {
1992
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
1993
0
            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
1994
0
            // `Group::WIDTH` is safe
1995
0
            self.ctrl(0)
1996
0
                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
1997
0
        } else {
1998
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
1999
0
            // control bytes,so copying `Group::WIDTH` bytes with offset equal
2000
0
            // to `self.buckets() == self.bucket_mask + 1` is safe
2001
0
            self.ctrl(0)
2002
0
                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2003
0
        }
2004
0
    }
2005
2006
    /// Returns an iterator over every element in the table.
2007
    ///
2008
    /// # Safety
2009
    ///
2010
    /// If any of the following conditions are violated, the result
2011
    /// is [`undefined behavior`]:
2012
    ///
2013
    /// * The caller has to ensure that the `RawTableInner` outlives the
2014
    ///   `RawIter`. Because we cannot make the `next` method unsafe on
2015
    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
2016
    ///
2017
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2018
    ///
2019
    /// The type `T` must be the actual type of the elements stored in the table,
2020
    /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
2021
    ///
2022
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2023
    #[inline]
2024
193k
    unsafe fn iter<T>(&self) -> RawIter<T> {
2025
193k
        // SAFETY:
2026
193k
        // 1. Since the caller of this function ensures that the control bytes
2027
193k
        //    are properly initialized and `self.data_end()` points to the start
2028
193k
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029
193k
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030
193k
        //    control bytes.
2031
193k
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032
193k
        //    equal to zero).
2033
193k
        // 3. We pass the exact value of buckets of the table to the function.
2034
193k
        //
2035
193k
        //                         `ctrl` points here (to the start
2036
193k
        //                         of the first control byte `CT0`)
2037
193k
        //                          ∨
2038
193k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039
193k
        //                           \________  ________/
2040
193k
        //                                    \/
2041
193k
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042
193k
        //
2043
193k
        // where: T0...T_n  - our stored data;
2044
193k
        //        CT0...CT_n - control bytes or metadata for `data`.
2045
193k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046
193k
        //                        with loading `Group` bytes from the heap works properly, even if the result
2047
193k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048
193k
        //                        `RawTableInner::set_ctrl` function.
2049
193k
        //
2050
193k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051
193k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052
193k
        let data = Bucket::from_base_index(self.data_end(), 0);
2053
193k
        RawIter {
2054
193k
            // SAFETY: See explanation above
2055
193k
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056
193k
            items: self.items,
2057
193k
        }
2058
193k
    }
<hashbrown::raw::RawTableInner>::iter::<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2024
38.7k
    unsafe fn iter<T>(&self) -> RawIter<T> {
2025
38.7k
        // SAFETY:
2026
38.7k
        // 1. Since the caller of this function ensures that the control bytes
2027
38.7k
        //    are properly initialized and `self.data_end()` points to the start
2028
38.7k
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029
38.7k
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030
38.7k
        //    control bytes.
2031
38.7k
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032
38.7k
        //    equal to zero).
2033
38.7k
        // 3. We pass the exact value of buckets of the table to the function.
2034
38.7k
        //
2035
38.7k
        //                         `ctrl` points here (to the start
2036
38.7k
        //                         of the first control byte `CT0`)
2037
38.7k
        //                          ∨
2038
38.7k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039
38.7k
        //                           \________  ________/
2040
38.7k
        //                                    \/
2041
38.7k
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042
38.7k
        //
2043
38.7k
        // where: T0...T_n  - our stored data;
2044
38.7k
        //        CT0...CT_n - control bytes or metadata for `data`.
2045
38.7k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046
38.7k
        //                        with loading `Group` bytes from the heap works properly, even if the result
2047
38.7k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048
38.7k
        //                        `RawTableInner::set_ctrl` function.
2049
38.7k
        //
2050
38.7k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051
38.7k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052
38.7k
        let data = Bucket::from_base_index(self.data_end(), 0);
2053
38.7k
        RawIter {
2054
38.7k
            // SAFETY: See explanation above
2055
38.7k
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056
38.7k
            items: self.items,
2057
38.7k
        }
2058
38.7k
    }
<hashbrown::raw::RawTableInner>::iter::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2024
38.7k
    unsafe fn iter<T>(&self) -> RawIter<T> {
2025
38.7k
        // SAFETY:
2026
38.7k
        // 1. Since the caller of this function ensures that the control bytes
2027
38.7k
        //    are properly initialized and `self.data_end()` points to the start
2028
38.7k
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029
38.7k
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030
38.7k
        //    control bytes.
2031
38.7k
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032
38.7k
        //    equal to zero).
2033
38.7k
        // 3. We pass the exact value of buckets of the table to the function.
2034
38.7k
        //
2035
38.7k
        //                         `ctrl` points here (to the start
2036
38.7k
        //                         of the first control byte `CT0`)
2037
38.7k
        //                          ∨
2038
38.7k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039
38.7k
        //                           \________  ________/
2040
38.7k
        //                                    \/
2041
38.7k
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042
38.7k
        //
2043
38.7k
        // where: T0...T_n  - our stored data;
2044
38.7k
        //        CT0...CT_n - control bytes or metadata for `data`.
2045
38.7k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046
38.7k
        //                        with loading `Group` bytes from the heap works properly, even if the result
2047
38.7k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048
38.7k
        //                        `RawTableInner::set_ctrl` function.
2049
38.7k
        //
2050
38.7k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051
38.7k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052
38.7k
        let data = Bucket::from_base_index(self.data_end(), 0);
2053
38.7k
        RawIter {
2054
38.7k
            // SAFETY: See explanation above
2055
38.7k
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056
38.7k
            items: self.items,
2057
38.7k
        }
2058
38.7k
    }
<hashbrown::raw::RawTableInner>::iter::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>
Line
Count
Source
2024
38.7k
    unsafe fn iter<T>(&self) -> RawIter<T> {
2025
38.7k
        // SAFETY:
2026
38.7k
        // 1. Since the caller of this function ensures that the control bytes
2027
38.7k
        //    are properly initialized and `self.data_end()` points to the start
2028
38.7k
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029
38.7k
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030
38.7k
        //    control bytes.
2031
38.7k
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032
38.7k
        //    equal to zero).
2033
38.7k
        // 3. We pass the exact value of buckets of the table to the function.
2034
38.7k
        //
2035
38.7k
        //                         `ctrl` points here (to the start
2036
38.7k
        //                         of the first control byte `CT0`)
2037
38.7k
        //                          ∨
2038
38.7k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039
38.7k
        //                           \________  ________/
2040
38.7k
        //                                    \/
2041
38.7k
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042
38.7k
        //
2043
38.7k
        // where: T0...T_n  - our stored data;
2044
38.7k
        //        CT0...CT_n - control bytes or metadata for `data`.
2045
38.7k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046
38.7k
        //                        with loading `Group` bytes from the heap works properly, even if the result
2047
38.7k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048
38.7k
        //                        `RawTableInner::set_ctrl` function.
2049
38.7k
        //
2050
38.7k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051
38.7k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052
38.7k
        let data = Bucket::from_base_index(self.data_end(), 0);
2053
38.7k
        RawIter {
2054
38.7k
            // SAFETY: See explanation above
2055
38.7k
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056
38.7k
            items: self.items,
2057
38.7k
        }
2058
38.7k
    }
<hashbrown::raw::RawTableInner>::iter::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>
Line
Count
Source
2024
38.7k
    unsafe fn iter<T>(&self) -> RawIter<T> {
2025
38.7k
        // SAFETY:
2026
38.7k
        // 1. Since the caller of this function ensures that the control bytes
2027
38.7k
        //    are properly initialized and `self.data_end()` points to the start
2028
38.7k
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029
38.7k
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030
38.7k
        //    control bytes.
2031
38.7k
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032
38.7k
        //    equal to zero).
2033
38.7k
        // 3. We pass the exact value of buckets of the table to the function.
2034
38.7k
        //
2035
38.7k
        //                         `ctrl` points here (to the start
2036
38.7k
        //                         of the first control byte `CT0`)
2037
38.7k
        //                          ∨
2038
38.7k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039
38.7k
        //                           \________  ________/
2040
38.7k
        //                                    \/
2041
38.7k
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042
38.7k
        //
2043
38.7k
        // where: T0...T_n  - our stored data;
2044
38.7k
        //        CT0...CT_n - control bytes or metadata for `data`.
2045
38.7k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046
38.7k
        //                        with loading `Group` bytes from the heap works properly, even if the result
2047
38.7k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048
38.7k
        //                        `RawTableInner::set_ctrl` function.
2049
38.7k
        //
2050
38.7k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051
38.7k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052
38.7k
        let data = Bucket::from_base_index(self.data_end(), 0);
2053
38.7k
        RawIter {
2054
38.7k
            // SAFETY: See explanation above
2055
38.7k
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056
38.7k
            items: self.items,
2057
38.7k
        }
2058
38.7k
    }
<hashbrown::raw::RawTableInner>::iter::<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2024
38.7k
    unsafe fn iter<T>(&self) -> RawIter<T> {
2025
38.7k
        // SAFETY:
2026
38.7k
        // 1. Since the caller of this function ensures that the control bytes
2027
38.7k
        //    are properly initialized and `self.data_end()` points to the start
2028
38.7k
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029
38.7k
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030
38.7k
        //    control bytes.
2031
38.7k
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032
38.7k
        //    equal to zero).
2033
38.7k
        // 3. We pass the exact value of buckets of the table to the function.
2034
38.7k
        //
2035
38.7k
        //                         `ctrl` points here (to the start
2036
38.7k
        //                         of the first control byte `CT0`)
2037
38.7k
        //                          ∨
2038
38.7k
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039
38.7k
        //                           \________  ________/
2040
38.7k
        //                                    \/
2041
38.7k
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042
38.7k
        //
2043
38.7k
        // where: T0...T_n  - our stored data;
2044
38.7k
        //        CT0...CT_n - control bytes or metadata for `data`.
2045
38.7k
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046
38.7k
        //                        with loading `Group` bytes from the heap works properly, even if the result
2047
38.7k
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048
38.7k
        //                        `RawTableInner::set_ctrl` function.
2049
38.7k
        //
2050
38.7k
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051
38.7k
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052
38.7k
        let data = Bucket::from_base_index(self.data_end(), 0);
2053
38.7k
        RawIter {
2054
38.7k
            // SAFETY: See explanation above
2055
38.7k
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056
38.7k
            items: self.items,
2057
38.7k
        }
2058
38.7k
    }
2059
2060
    /// Executes the destructors (if any) of the values stored in the table.
2061
    ///
2062
    /// # Note
2063
    ///
2064
    /// This function does not erase the control bytes of the table and does
2065
    /// not make any changes to the `items` or `growth_left` fields of the
2066
    /// table. If necessary, the caller of this function must manually set
2067
    /// up these table fields, for example using the [`clear_no_drop`] function.
2068
    ///
2069
    /// Be careful during calling this function, because drop function of
2070
    /// the elements can panic, and this can leave table in an inconsistent
2071
    /// state.
2072
    ///
2073
    /// # Safety
2074
    ///
2075
    /// The type `T` must be the actual type of the elements stored in the table,
2076
    /// otherwise calling this function may result in [`undefined behavior`].
2077
    ///
2078
    /// If `T` is a type that should be dropped and **the table is not empty**,
2079
    /// calling this function more than once results in [`undefined behavior`].
2080
    ///
2081
    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2082
    /// calling this function may result in [`undefined behavior`].
2083
    ///
2084
    /// It is safe to call this function on a table that has not been allocated,
2085
    /// on a table with uninitialized control bytes, and on a table with no actual
2086
    /// data but with `Full` control bytes if `self.items == 0`.
2087
    ///
2088
    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2089
    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2090
    /// [`RawTableInner`].
2091
    ///
2092
    /// [`Bucket::drop`]: Bucket::drop
2093
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2094
    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2095
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2096
193k
    unsafe fn drop_elements<T>(&mut self) {
2097
193k
        // Check that `self.items != 0`. Protects against the possibility
2098
193k
        // of creating an iterator on an table with uninitialized control bytes.
2099
193k
        if T::NEEDS_DROP && self.items != 0 {
2100
            // SAFETY: We know for sure that RawTableInner will outlive the
2101
            // returned `RawIter` iterator, and the caller of this function
2102
            // must uphold the safety contract for `drop_elements` method.
2103
0
            for item in self.iter::<T>() {
2104
0
                // SAFETY: The caller must uphold the safety contract for
2105
0
                // `drop_elements` method.
2106
0
                item.drop();
2107
0
            }
2108
193k
        }
2109
193k
    }
<hashbrown::raw::RawTableInner>::drop_elements::<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2096
38.7k
    unsafe fn drop_elements<T>(&mut self) {
2097
38.7k
        // Check that `self.items != 0`. Protects against the possibility
2098
38.7k
        // of creating an iterator on an table with uninitialized control bytes.
2099
38.7k
        if T::NEEDS_DROP && self.items != 0 {
2100
            // SAFETY: We know for sure that RawTableInner will outlive the
2101
            // returned `RawIter` iterator, and the caller of this function
2102
            // must uphold the safety contract for `drop_elements` method.
2103
0
            for item in self.iter::<T>() {
2104
0
                // SAFETY: The caller must uphold the safety contract for
2105
0
                // `drop_elements` method.
2106
0
                item.drop();
2107
0
            }
2108
38.7k
        }
2109
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_elements::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2096
38.7k
    unsafe fn drop_elements<T>(&mut self) {
2097
38.7k
        // Check that `self.items != 0`. Protects against the possibility
2098
38.7k
        // of creating an iterator on an table with uninitialized control bytes.
2099
38.7k
        if T::NEEDS_DROP && self.items != 0 {
2100
            // SAFETY: We know for sure that RawTableInner will outlive the
2101
            // returned `RawIter` iterator, and the caller of this function
2102
            // must uphold the safety contract for `drop_elements` method.
2103
0
            for item in self.iter::<T>() {
2104
0
                // SAFETY: The caller must uphold the safety contract for
2105
0
                // `drop_elements` method.
2106
0
                item.drop();
2107
0
            }
2108
38.7k
        }
2109
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_elements::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>
Line
Count
Source
2096
38.7k
    unsafe fn drop_elements<T>(&mut self) {
2097
38.7k
        // Check that `self.items != 0`. Protects against the possibility
2098
38.7k
        // of creating an iterator on an table with uninitialized control bytes.
2099
38.7k
        if T::NEEDS_DROP && self.items != 0 {
2100
            // SAFETY: We know for sure that RawTableInner will outlive the
2101
            // returned `RawIter` iterator, and the caller of this function
2102
            // must uphold the safety contract for `drop_elements` method.
2103
0
            for item in self.iter::<T>() {
2104
0
                // SAFETY: The caller must uphold the safety contract for
2105
0
                // `drop_elements` method.
2106
0
                item.drop();
2107
0
            }
2108
38.7k
        }
2109
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_elements::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>
Line
Count
Source
2096
38.7k
    unsafe fn drop_elements<T>(&mut self) {
2097
38.7k
        // Check that `self.items != 0`. Protects against the possibility
2098
38.7k
        // of creating an iterator on an table with uninitialized control bytes.
2099
38.7k
        if T::NEEDS_DROP && self.items != 0 {
2100
            // SAFETY: We know for sure that RawTableInner will outlive the
2101
            // returned `RawIter` iterator, and the caller of this function
2102
            // must uphold the safety contract for `drop_elements` method.
2103
0
            for item in self.iter::<T>() {
2104
0
                // SAFETY: The caller must uphold the safety contract for
2105
0
                // `drop_elements` method.
2106
0
                item.drop();
2107
0
            }
2108
38.7k
        }
2109
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_elements::<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2096
38.7k
    unsafe fn drop_elements<T>(&mut self) {
2097
38.7k
        // Check that `self.items != 0`. Protects against the possibility
2098
38.7k
        // of creating an iterator on an table with uninitialized control bytes.
2099
38.7k
        if T::NEEDS_DROP && self.items != 0 {
2100
            // SAFETY: We know for sure that RawTableInner will outlive the
2101
            // returned `RawIter` iterator, and the caller of this function
2102
            // must uphold the safety contract for `drop_elements` method.
2103
0
            for item in self.iter::<T>() {
2104
0
                // SAFETY: The caller must uphold the safety contract for
2105
0
                // `drop_elements` method.
2106
0
                item.drop();
2107
0
            }
2108
38.7k
        }
2109
38.7k
    }
2110
2111
    /// Executes the destructors (if any) of the values stored in the table and than
2112
    /// deallocates the table.
2113
    ///
2114
    /// # Note
2115
    ///
2116
    /// Calling this function automatically makes invalid (dangling) all instances of
2117
    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2118
    ///
2119
    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2120
    /// fields of the table. If necessary, the caller of this function must manually set
2121
    /// up these table fields.
2122
    ///
2123
    /// # Safety
2124
    ///
2125
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2126
    ///
2127
    /// * Calling this function more than once;
2128
    ///
2129
    /// * The type `T` must be the actual type of the elements stored in the table.
2130
    ///
2131
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2132
    ///   to allocate this table.
2133
    ///
2134
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2135
    ///   was used to allocate this table.
2136
    ///
2137
    /// The caller of this function should pay attention to the possibility of the
2138
    /// elements' drop function panicking, because this:
2139
    ///
2140
    ///    * May leave the table in an inconsistent state;
2141
    ///
2142
    ///    * Memory is never deallocated, so a memory leak may occur.
2143
    ///
2144
    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2145
    /// function results in [`undefined behavior`].
2146
    ///
2147
    /// It is safe to call this function on a table that has not been allocated,
2148
    /// on a table with uninitialized control bytes, and on a table with no actual
2149
    /// data but with `Full` control bytes if `self.items == 0`.
2150
    ///
2151
    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2152
    /// for more  information.
2153
    ///
2154
    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2155
    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2156
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2157
193k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158
193k
        if !self.is_empty_singleton() {
2159
193k
            unsafe {
2160
193k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161
193k
                self.drop_elements::<T>();
2162
193k
                // SAFETY:
2163
193k
                // 1. We have checked that our table is allocated.
2164
193k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165
193k
                self.free_buckets(alloc, table_layout);
2166
193k
            }
2167
0
        }
2168
193k
    }
<hashbrown::raw::RawTableInner>::drop_inner_table::<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>), allocator_api2::stable::alloc::global::Global>
Line
Count
Source
2157
38.7k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158
38.7k
        if !self.is_empty_singleton() {
2159
38.7k
            unsafe {
2160
38.7k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161
38.7k
                self.drop_elements::<T>();
2162
38.7k
                // SAFETY:
2163
38.7k
                // 1. We have checked that our table is allocated.
2164
38.7k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165
38.7k
                self.free_buckets(alloc, table_layout);
2166
38.7k
            }
2167
0
        }
2168
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_inner_table::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>), allocator_api2::stable::alloc::global::Global>
Line
Count
Source
2157
38.7k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158
38.7k
        if !self.is_empty_singleton() {
2159
38.7k
            unsafe {
2160
38.7k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161
38.7k
                self.drop_elements::<T>();
2162
38.7k
                // SAFETY:
2163
38.7k
                // 1. We have checked that our table is allocated.
2164
38.7k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165
38.7k
                self.free_buckets(alloc, table_layout);
2166
38.7k
            }
2167
0
        }
2168
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_inner_table::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>), allocator_api2::stable::alloc::global::Global>
Line
Count
Source
2157
38.7k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158
38.7k
        if !self.is_empty_singleton() {
2159
38.7k
            unsafe {
2160
38.7k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161
38.7k
                self.drop_elements::<T>();
2162
38.7k
                // SAFETY:
2163
38.7k
                // 1. We have checked that our table is allocated.
2164
38.7k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165
38.7k
                self.free_buckets(alloc, table_layout);
2166
38.7k
            }
2167
0
        }
2168
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_inner_table::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>), allocator_api2::stable::alloc::global::Global>
Line
Count
Source
2157
38.7k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158
38.7k
        if !self.is_empty_singleton() {
2159
38.7k
            unsafe {
2160
38.7k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161
38.7k
                self.drop_elements::<T>();
2162
38.7k
                // SAFETY:
2163
38.7k
                // 1. We have checked that our table is allocated.
2164
38.7k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165
38.7k
                self.free_buckets(alloc, table_layout);
2166
38.7k
            }
2167
0
        }
2168
38.7k
    }
<hashbrown::raw::RawTableInner>::drop_inner_table::<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>), allocator_api2::stable::alloc::global::Global>
Line
Count
Source
2157
38.7k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158
38.7k
        if !self.is_empty_singleton() {
2159
38.7k
            unsafe {
2160
38.7k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161
38.7k
                self.drop_elements::<T>();
2162
38.7k
                // SAFETY:
2163
38.7k
                // 1. We have checked that our table is allocated.
2164
38.7k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165
38.7k
                self.free_buckets(alloc, table_layout);
2166
38.7k
            }
2167
0
        }
2168
38.7k
    }
2169
2170
    /// Returns a pointer to an element in the table (convenience for
2171
    /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
2172
    ///
2173
    /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
2174
    /// otherwise using it may result in [`undefined behavior`].
2175
    ///
2176
    /// # Safety
2177
    ///
2178
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
2179
    /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
2180
    /// this function, the following safety rules must be observed:
2181
    ///
2182
    /// * The table must already be allocated;
2183
    ///
2184
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2185
    ///   function, i.e. `(index + 1) <= self.buckets()`.
2186
    ///
2187
    /// * The type `T` must be the actual type of the elements stored in the table, otherwise
2188
    ///   using the returned [`Bucket`] may result in [`undefined behavior`].
2189
    ///
2190
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
2191
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
2192
    ///
2193
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
2194
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
2195
    /// `(index + 1) <= self.buckets()`.
2196
    ///
2197
    /// ```none
2198
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2199
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2200
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2201
    ///
2202
    ///           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
2203
    ///           part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
2204
    ///                  |
2205
    ///                  |               `base = table.data_end::<T>()` points here
2206
    ///                  |               (to the start of CT0 or to the end of T0)
2207
    ///                  v                 v
2208
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2209
    ///                     ^                                              \__________  __________/
2210
    ///        `table.bucket(3)` returns a pointer that points                        \/
2211
    ///         here in the `data` part of the `RawTableInner`             additional control bytes
2212
    ///         (to the end of T3)                                          `m = Group::WIDTH - 1`
2213
    ///
2214
    /// where: T0...T_n  - our stored data;
2215
    ///        CT0...CT_n - control bytes or metadata for `data`;
2216
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2217
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2218
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2219
    ///
2220
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2221
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2222
    /// ```
2223
    ///
2224
    /// [`Bucket::from_base_index`]: Bucket::from_base_index
2225
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2226
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2227
    #[inline]
2228
0
    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2229
0
        debug_assert_ne!(self.bucket_mask, 0);
2230
0
        debug_assert!(index < self.buckets());
2231
0
        Bucket::from_base_index(self.data_end(), index)
2232
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>
2233
2234
    /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
2235
    /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
2236
    ///
2237
    /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
2238
    /// otherwise using it may result in [`undefined behavior`].
2239
    ///
2240
    /// # Safety
2241
    ///
2242
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2243
    ///
2244
    /// * The table must already be allocated;
2245
    ///
2246
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2247
    ///   function, i.e. `(index + 1) <= self.buckets()`;
2248
    ///
2249
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2250
    ///
2251
    /// ```none
2252
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2253
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2254
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2255
    ///
2256
    ///           `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
2257
    ///           `data` part of the `RawTableInner`, i.e. to the start of T3
2258
    ///                  |
2259
    ///                  |               `base = table.data_end::<u8>()` points here
2260
    ///                  |               (to the start of CT0 or to the end of T0)
2261
    ///                  v                 v
2262
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2263
    ///                                                                    \__________  __________/
2264
    ///                                                                               \/
2265
    ///                                                                    additional control bytes
2266
    ///                                                                     `m = Group::WIDTH - 1`
2267
    ///
2268
    /// where: T0...T_n  - our stored data;
2269
    ///        CT0...CT_n - control bytes or metadata for `data`;
2270
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2271
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2272
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2273
    ///
2274
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2275
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2276
    /// ```
2277
    ///
2278
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2279
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2280
    #[inline]
2281
0
    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2282
0
        debug_assert_ne!(self.bucket_mask, 0);
2283
0
        debug_assert!(index < self.buckets());
2284
0
        let base: *mut u8 = self.data_end().as_ptr();
2285
0
        base.sub((index + 1) * size_of)
2286
0
    }
2287
2288
    /// Returns pointer to one past last `data` element in the table as viewed from
2289
    /// the start point of the allocation (convenience for `self.ctrl.cast()`).
2290
    ///
2291
    /// This function actually returns a pointer to the end of the `data element` at
2292
    /// index "0" (zero).
2293
    ///
2294
    /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
2295
    /// otherwise using it may result in [`undefined behavior`].
2296
    ///
2297
    /// # Note
2298
    ///
2299
    /// The type `T` must be the actual type of the elements stored in the table, otherwise
2300
    /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
2301
    ///
2302
    /// ```none
2303
    ///                        `table.data_end::<T>()` returns pointer that points here
2304
    ///                        (to the end of `T0`)
2305
    ///                          ∨
2306
    /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2307
    ///                           \________  ________/
2308
    ///                                    \/
2309
    ///       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2310
    ///
2311
    /// where: T0...T_n  - our stored data;
2312
    ///        CT0...CT_n - control bytes or metadata for `data`.
2313
    ///        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2314
    ///                        with loading `Group` bytes from the heap works properly, even if the result
2315
    ///                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2316
    ///                        `RawTableInner::set_ctrl` function.
2317
    ///
2318
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2319
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2320
    /// ```
2321
    ///
2322
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2323
    #[inline]
2324
193k
    fn data_end<T>(&self) -> NonNull<T> {
2325
193k
        self.ctrl.cast()
2326
193k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<u8>
<hashbrown::raw::RawTableInner>::data_end::<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2324
38.7k
    fn data_end<T>(&self) -> NonNull<T> {
2325
38.7k
        self.ctrl.cast()
2326
38.7k
    }
<hashbrown::raw::RawTableInner>::data_end::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2324
38.7k
    fn data_end<T>(&self) -> NonNull<T> {
2325
38.7k
        self.ctrl.cast()
2326
38.7k
    }
<hashbrown::raw::RawTableInner>::data_end::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>
Line
Count
Source
2324
38.7k
    fn data_end<T>(&self) -> NonNull<T> {
2325
38.7k
        self.ctrl.cast()
2326
38.7k
    }
<hashbrown::raw::RawTableInner>::data_end::<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>
Line
Count
Source
2324
38.7k
    fn data_end<T>(&self) -> NonNull<T> {
2325
38.7k
        self.ctrl.cast()
2326
38.7k
    }
<hashbrown::raw::RawTableInner>::data_end::<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>
Line
Count
Source
2324
38.7k
    fn data_end<T>(&self) -> NonNull<T> {
2325
38.7k
        self.ctrl.cast()
2326
38.7k
    }
2327
2328
    /// Returns an iterator-like object for a probe sequence on the table.
2329
    ///
2330
    /// This iterator never terminates, but is guaranteed to visit each bucket
2331
    /// group exactly once. The loop using `probe_seq` must terminate upon
2332
    /// reaching a group containing an empty bucket.
2333
    #[inline]
2334
1.58M
    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2335
1.58M
        ProbeSeq {
2336
1.58M
            // This is the same as `hash as usize % self.buckets()` because the number
2337
1.58M
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2338
1.58M
            pos: h1(hash) & self.bucket_mask,
2339
1.58M
            stride: 0,
2340
1.58M
        }
2341
1.58M
    }
2342
2343
    #[inline]
2344
337k
    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, hash: u64) {
2345
337k
        self.growth_left -= usize::from(old_ctrl.special_is_empty());
2346
337k
        self.set_ctrl_hash(index, hash);
2347
337k
        self.items += 1;
2348
337k
    }
2349
2350
    #[inline]
2351
0
    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2352
0
        let probe_seq_pos = self.probe_seq(hash).pos;
2353
0
        let probe_index =
2354
0
            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
2355
0
        probe_index(i) == probe_index(new_i)
2356
0
    }
2357
2358
    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2359
    /// the end of the array.
2360
    ///
2361
    /// This function does not make any changes to the `data` parts of the table,
2362
    /// or any changes to the `items` or `growth_left` field of the table.
2363
    ///
2364
    /// # Safety
2365
    ///
2366
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2367
    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2368
    /// following rules when calling this function:
2369
    ///
2370
    /// * The [`RawTableInner`] has already been allocated;
2371
    ///
2372
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2373
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2374
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2375
    ///
2376
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2377
    ///
2378
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2379
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2380
    ///
2381
    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2382
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2383
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2384
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2385
    #[inline]
2386
337k
    unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) {
2387
337k
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`]
2388
337k
        self.set_ctrl(index, Tag::full(hash));
2389
337k
    }
2390
2391
    /// Replaces the hash in the control byte at the given index with the provided one,
2392
    /// and possibly also replicates the new control byte at the end of the array of control
2393
    /// bytes, returning the old control byte.
2394
    ///
2395
    /// This function does not make any changes to the `data` parts of the table,
2396
    /// or any changes to the `items` or `growth_left` field of the table.
2397
    ///
2398
    /// # Safety
2399
    ///
2400
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_hash`]
2401
    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2402
    /// methods, you must observe the following rules when calling this function:
2403
    ///
2404
    /// * The [`RawTableInner`] has already been allocated;
2405
    ///
2406
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2407
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2408
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2409
    ///
2410
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2411
    ///
2412
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2413
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2414
    ///
2415
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
2416
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2417
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2418
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2419
    #[inline]
2420
0
    unsafe fn replace_ctrl_hash(&mut self, index: usize, hash: u64) -> Tag {
2421
0
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`]
2422
0
        let prev_ctrl = *self.ctrl(index);
2423
0
        self.set_ctrl_hash(index, hash);
2424
0
        prev_ctrl
2425
0
    }
2426
2427
    /// Sets a control byte, and possibly also the replicated control byte at
2428
    /// the end of the array.
2429
    ///
2430
    /// This function does not make any changes to the `data` parts of the table,
2431
    /// or any changes to the `items` or `growth_left` field of the table.
2432
    ///
2433
    /// # Safety
2434
    ///
2435
    /// You must observe the following safety rules when calling this function:
2436
    ///
2437
    /// * The [`RawTableInner`] has already been allocated;
2438
    ///
2439
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2440
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2441
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2442
    ///
2443
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2444
    ///
2445
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2446
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2447
    ///
2448
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2449
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2450
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2451
    #[inline]
2452
505k
    unsafe fn set_ctrl(&mut self, index: usize, ctrl: Tag) {
2453
505k
        // Replicate the first Group::WIDTH control bytes at the end of
2454
505k
        // the array without using a branch. If the tables smaller than
2455
505k
        // the group width (self.buckets() < Group::WIDTH),
2456
505k
        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2457
505k
        //
2458
505k
        // - If index >= Group::WIDTH then index == index2.
2459
505k
        // - Otherwise index2 == self.bucket_mask + 1 + index.
2460
505k
        //
2461
505k
        // The very last replicated control byte is never actually read because
2462
505k
        // we mask the initial index for unaligned loads, but we write it
2463
505k
        // anyways because it makes the set_ctrl implementation simpler.
2464
505k
        //
2465
505k
        // If there are fewer buckets than Group::WIDTH then this code will
2466
505k
        // replicate the buckets at the end of the trailing group. For example
2467
505k
        // with 2 buckets and a group size of 4, the control bytes will look
2468
505k
        // like this:
2469
505k
        //
2470
505k
        //     Real    |             Replicated
2471
505k
        // ---------------------------------------------
2472
505k
        // | [A] | [B] | [Tag::EMPTY] | [EMPTY] | [A] | [B] |
2473
505k
        // ---------------------------------------------
2474
505k
2475
505k
        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2476
505k
        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2477
505k
        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2478
505k
2479
505k
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2480
505k
        *self.ctrl(index) = ctrl;
2481
505k
        *self.ctrl(index2) = ctrl;
2482
505k
    }
2483
2484
    /// Returns a pointer to a control byte.
2485
    ///
2486
    /// # Safety
2487
    ///
2488
    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2489
    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2490
    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2491
    /// will return a pointer to the end of the allocated table and it is useless on its own.
2492
    ///
2493
    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2494
    /// table that has not been allocated results in [`Undefined Behavior`].
2495
    ///
2496
    /// So to satisfy both requirements you should always follow the rule that
2497
    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2498
    ///
2499
    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2500
    /// for read-only purpose.
2501
    ///
2502
    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2503
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2504
    ///
2505
    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2506
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2507
    #[inline]
2508
3.60M
    unsafe fn ctrl(&self, index: usize) -> *mut Tag {
2509
3.60M
        debug_assert!(index < self.num_ctrl_bytes());
2510
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2511
3.60M
        self.ctrl.as_ptr().add(index).cast()
2512
3.60M
    }
2513
2514
    /// Gets the slice of all control bytes.
2515
387k
    fn ctrl_slice(&mut self) -> &mut [Tag] {
2516
387k
        // SAFETY: We've intiailized all control bytes, and have the correct number.
2517
387k
        unsafe { slice::from_raw_parts_mut(self.ctrl.as_ptr().cast(), self.num_ctrl_bytes()) }
2518
387k
    }
2519
2520
    #[inline]
2521
387k
    fn buckets(&self) -> usize {
2522
387k
        self.bucket_mask + 1
2523
387k
    }
2524
2525
    /// Checks whether the bucket at `index` is full.
2526
    ///
2527
    /// # Safety
2528
    ///
2529
    /// The caller must ensure `index` is less than the number of buckets.
2530
    #[inline]
2531
337k
    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2532
337k
        debug_assert!(index < self.buckets());
2533
337k
        (*self.ctrl(index)).is_full()
2534
337k
    }
2535
2536
    #[inline]
2537
387k
    fn num_ctrl_bytes(&self) -> usize {
2538
387k
        self.bucket_mask + 1 + Group::WIDTH
2539
387k
    }
2540
2541
    #[inline]
2542
387k
    fn is_empty_singleton(&self) -> bool {
2543
387k
        self.bucket_mask == 0
2544
387k
    }
2545
2546
    /// Attempts to allocate a new hash table with at least enough capacity
2547
    /// for inserting the given number of elements without reallocating,
2548
    /// and return it inside `ScopeGuard` to protect against panic in the hash
2549
    /// function.
2550
    ///
2551
    /// # Note
2552
    ///
2553
    /// It is recommended (but not required):
2554
    ///
2555
    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2556
    ///
2557
    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2558
    ///   to allocate this table.
2559
    ///
2560
    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2561
    ///   to allocate this table.
2562
    ///
2563
    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2564
    /// this table, then using `mem::swap` with the `self` and the new table returned
2565
    /// by this function results in [`undefined behavior`].
2566
    ///
2567
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2568
    #[allow(clippy::mut_mut)]
2569
    #[inline]
2570
0
    fn prepare_resize<'a, A>(
2571
0
        &self,
2572
0
        alloc: &'a A,
2573
0
        table_layout: TableLayout,
2574
0
        capacity: usize,
2575
0
        fallibility: Fallibility,
2576
0
    ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2577
0
    where
2578
0
        A: Allocator,
2579
0
    {
2580
0
        debug_assert!(self.items <= capacity);
2581
2582
        // Allocate and initialize the new table.
2583
0
        let new_table =
2584
0
            RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2585
2586
        // The hash function may panic, in which case we simply free the new
2587
        // table without dropping any elements that may have been copied into
2588
        // it.
2589
        //
2590
        // This guard is also used to free the old table on success, see
2591
        // the comment at the bottom of this function.
2592
0
        Ok(guard(new_table, move |self_| {
2593
0
            if !self_.is_empty_singleton() {
2594
0
                // SAFETY:
2595
0
                // 1. We have checked that our table is allocated.
2596
0
                // 2. We know for sure that the `alloc` and `table_layout` matches the
2597
0
                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2598
0
                unsafe { self_.free_buckets(alloc, table_layout) };
2599
0
            }
2600
0
        }))
2601
0
    }
2602
2603
    /// Reserves or rehashes to make room for `additional` more elements.
2604
    ///
2605
    /// This uses dynamic dispatch to reduce the amount of
2606
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2607
    ///
2608
    /// # Safety
2609
    ///
2610
    /// If any of the following conditions are violated, the result is
2611
    /// [`undefined behavior`]:
2612
    ///
2613
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2614
    ///   to allocate this table.
2615
    ///
2616
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2617
    ///   used to allocate this table.
2618
    ///
2619
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2620
    ///   the elements stored in the table.
2621
    ///
2622
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2623
    ///
2624
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2625
    #[allow(clippy::inline_always)]
2626
    #[inline(always)]
2627
0
    unsafe fn reserve_rehash_inner<A>(
2628
0
        &mut self,
2629
0
        alloc: &A,
2630
0
        additional: usize,
2631
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2632
0
        fallibility: Fallibility,
2633
0
        layout: TableLayout,
2634
0
        drop: Option<unsafe fn(*mut u8)>,
2635
0
    ) -> Result<(), TryReserveError>
2636
0
    where
2637
0
        A: Allocator,
2638
0
    {
2639
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2640
0
        let new_items = match self.items.checked_add(additional) {
2641
0
            Some(new_items) => new_items,
2642
0
            None => return Err(fallibility.capacity_overflow()),
2643
        };
2644
0
        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2645
0
        if new_items <= full_capacity / 2 {
2646
            // Rehash in-place without re-allocating if we have plenty of spare
2647
            // capacity that is locked up due to DELETED entries.
2648
2649
            // SAFETY:
2650
            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2651
            //    (since new_items <= full_capacity / 2);
2652
            // 2. The caller ensures that `drop` function is the actual drop function of
2653
            //    the elements stored in the table.
2654
            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2655
            //    used to allocate this table.
2656
            // 4. The caller ensures that the control bytes of the `RawTableInner`
2657
            //    are already initialized.
2658
0
            self.rehash_in_place(hasher, layout.size, drop);
2659
0
            Ok(())
2660
        } else {
2661
            // Otherwise, conservatively resize to at least the next size up
2662
            // to avoid churning deletes into frequent rehashes.
2663
            //
2664
            // SAFETY:
2665
            // 1. We know for sure that `capacity >= self.items`.
2666
            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2667
            //    [`TableLayout`] that were used to allocate this table.
2668
            // 3. The caller ensures that the control bytes of the `RawTableInner`
2669
            //    are already initialized.
2670
0
            self.resize_inner(
2671
0
                alloc,
2672
0
                usize::max(new_items, full_capacity + 1),
2673
0
                hasher,
2674
0
                fallibility,
2675
0
                layout,
2676
0
            )
2677
        }
2678
0
    }
2679
2680
    /// Returns an iterator over full buckets indices in the table.
2681
    ///
2682
    /// # Safety
2683
    ///
2684
    /// Behavior is undefined if any of the following conditions are violated:
2685
    ///
2686
    /// * The caller has to ensure that the `RawTableInner` outlives the
2687
    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2688
    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2689
    ///   `full_buckets_indices` method unsafe.
2690
    ///
2691
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2692
    #[inline(always)]
2693
0
    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2694
0
        // SAFETY:
2695
0
        // 1. Since the caller of this function ensures that the control bytes
2696
0
        //    are properly initialized and `self.ctrl(0)` points to the start
2697
0
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2698
0
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2699
0
        //    control bytes.
2700
0
        // 2. The value of `items` is equal to the amount of data (values) added
2701
0
        //    to the table.
2702
0
        //
2703
0
        //                         `ctrl` points here (to the start
2704
0
        //                         of the first control byte `CT0`)
2705
0
        //                          ∨
2706
0
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2707
0
        //                           \________  ________/
2708
0
        //                                    \/
2709
0
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2710
0
        //
2711
0
        // where: T0...T_n  - our stored data;
2712
0
        //        CT0...CT_n - control bytes or metadata for `data`.
2713
0
        let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::<u8>());
2714
0
2715
0
        FullBucketsIndices {
2716
0
            // Load the first group
2717
0
            // SAFETY: See explanation above.
2718
0
            current_group: Group::load_aligned(ctrl.as_ptr().cast())
2719
0
                .match_full()
2720
0
                .into_iter(),
2721
0
            group_first_index: 0,
2722
0
            ctrl,
2723
0
            items: self.items,
2724
0
        }
2725
0
    }
2726
2727
    /// Allocates a new table of a different size and moves the contents of the
2728
    /// current table into it.
2729
    ///
2730
    /// This uses dynamic dispatch to reduce the amount of
2731
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2732
    ///
2733
    /// # Safety
2734
    ///
2735
    /// If any of the following conditions are violated, the result is
2736
    /// [`undefined behavior`]:
2737
    ///
2738
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2739
    ///   to allocate this table;
2740
    ///
2741
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2742
    ///   used to allocate this table;
2743
    ///
2744
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2745
    ///
2746
    /// The caller of this function must ensure that `capacity >= self.items`
2747
    /// otherwise:
2748
    ///
2749
    /// * If `self.items != 0`, calling of this function with `capacity == 0`
2750
    ///   results in [`undefined behavior`].
2751
    ///
2752
    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
2753
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2754
    ///   results in [`undefined behavior`].
2755
    ///
2756
    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
2757
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2758
    ///   are never return (will go into an infinite loop).
2759
    ///
2760
    /// Note: It is recommended (but not required) that the new table's `capacity`
2761
    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
2762
    /// this function can never return. See [`RawTableInner::find_insert_slot`] for
2763
    /// more information.
2764
    ///
2765
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2766
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2767
    #[allow(clippy::inline_always)]
2768
    #[inline(always)]
2769
0
    unsafe fn resize_inner<A>(
2770
0
        &mut self,
2771
0
        alloc: &A,
2772
0
        capacity: usize,
2773
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2774
0
        fallibility: Fallibility,
2775
0
        layout: TableLayout,
2776
0
    ) -> Result<(), TryReserveError>
2777
0
    where
2778
0
        A: Allocator,
2779
0
    {
2780
        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2781
        // that were used to allocate this table.
2782
0
        let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
2783
2784
        // SAFETY: We know for sure that RawTableInner will outlive the
2785
        // returned `FullBucketsIndices` iterator, and the caller of this
2786
        // function ensures that the control bytes are properly initialized.
2787
0
        for full_byte_index in self.full_buckets_indices() {
2788
0
            // This may panic.
2789
0
            let hash = hasher(self, full_byte_index);
2790
0
2791
0
            // SAFETY:
2792
0
            // We can use a simpler version of insert() here since:
2793
0
            // 1. There are no DELETED entries.
2794
0
            // 2. We know there is enough space in the table.
2795
0
            // 3. All elements are unique.
2796
0
            // 4. The caller of this function guarantees that `capacity > 0`
2797
0
            //    so `new_table` must already have some allocated memory.
2798
0
            // 5. We set `growth_left` and `items` fields of the new table
2799
0
            //    after the loop.
2800
0
            // 6. We insert into the table, at the returned index, the data
2801
0
            //    matching the given hash immediately after calling this function.
2802
0
            let (new_index, _) = new_table.prepare_insert_slot(hash);
2803
0
2804
0
            // SAFETY:
2805
0
            //
2806
0
            // * `src` is valid for reads of `layout.size` bytes, since the
2807
0
            //   table is alive and the `full_byte_index` is guaranteed to be
2808
0
            //   within bounds (see `FullBucketsIndices::next_impl`);
2809
0
            //
2810
0
            // * `dst` is valid for writes of `layout.size` bytes, since the
2811
0
            //   caller ensures that `table_layout` matches the [`TableLayout`]
2812
0
            //   that was used to allocate old table and we have the `new_index`
2813
0
            //   returned by `prepare_insert_slot`.
2814
0
            //
2815
0
            // * Both `src` and `dst` are properly aligned.
2816
0
            //
2817
0
            // * Both `src` and `dst` point to different region of memory.
2818
0
            ptr::copy_nonoverlapping(
2819
0
                self.bucket_ptr(full_byte_index, layout.size),
2820
0
                new_table.bucket_ptr(new_index, layout.size),
2821
0
                layout.size,
2822
0
            );
2823
0
        }
2824
2825
        // The hash function didn't panic, so we can safely set the
2826
        // `growth_left` and `items` fields of the new table.
2827
0
        new_table.growth_left -= self.items;
2828
0
        new_table.items = self.items;
2829
0
2830
0
        // We successfully copied all elements without panicking. Now replace
2831
0
        // self with the new table. The old table will have its memory freed but
2832
0
        // the items will not be dropped (since they have been moved into the
2833
0
        // new table).
2834
0
        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2835
0
        // that was used to allocate this table.
2836
0
        mem::swap(self, &mut new_table);
2837
0
2838
0
        Ok(())
2839
0
    }
2840
2841
    /// Rehashes the contents of the table in place (i.e. without changing the
2842
    /// allocation).
2843
    ///
2844
    /// If `hasher` panics then some the table's contents may be lost.
2845
    ///
2846
    /// This uses dynamic dispatch to reduce the amount of
2847
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2848
    ///
2849
    /// # Safety
2850
    ///
2851
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2852
    ///
2853
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2854
    ///
2855
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2856
    ///   the elements stored in the table.
2857
    ///
2858
    /// * The [`RawTableInner`] has already been allocated;
2859
    ///
2860
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2861
    ///
2862
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2863
    #[allow(clippy::inline_always)]
2864
    #[cfg_attr(feature = "inline-more", inline(always))]
2865
    #[cfg_attr(not(feature = "inline-more"), inline)]
2866
0
    unsafe fn rehash_in_place(
2867
0
        &mut self,
2868
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2869
0
        size_of: usize,
2870
0
        drop: Option<unsafe fn(*mut u8)>,
2871
0
    ) {
2872
0
        // If the hash function panics then properly clean up any elements
2873
0
        // that we haven't rehashed yet. We unfortunately can't preserve the
2874
0
        // element since we lost their hash and have no way of recovering it
2875
0
        // without risking another panic.
2876
0
        self.prepare_rehash_in_place();
2877
0
2878
0
        let mut guard = guard(self, move |self_| {
2879
0
            if let Some(drop) = drop {
2880
0
                for i in 0..self_.buckets() {
2881
0
                    if *self_.ctrl(i) == Tag::DELETED {
2882
0
                        self_.set_ctrl(i, Tag::EMPTY);
2883
0
                        drop(self_.bucket_ptr(i, size_of));
2884
0
                        self_.items -= 1;
2885
0
                    }
2886
                }
2887
0
            }
2888
0
            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
2889
0
        });
2890
2891
        // At this point, DELETED elements are elements that we haven't
2892
        // rehashed yet. Find them and re-insert them at their ideal
2893
        // position.
2894
0
        'outer: for i in 0..guard.buckets() {
2895
0
            if *guard.ctrl(i) != Tag::DELETED {
2896
0
                continue;
2897
0
            }
2898
0
2899
0
            let i_p = guard.bucket_ptr(i, size_of);
2900
2901
            'inner: loop {
2902
                // Hash the current item
2903
0
                let hash = hasher(*guard, i);
2904
0
2905
0
                // Search for a suitable place to put it
2906
0
                //
2907
0
                // SAFETY: Caller of this function ensures that the control bytes
2908
0
                // are properly initialized.
2909
0
                let new_i = guard.find_insert_slot(hash).index;
2910
0
2911
0
                // Probing works by scanning through all of the control
2912
0
                // bytes in groups, which may not be aligned to the group
2913
0
                // size. If both the new and old position fall within the
2914
0
                // same unaligned group, then there is no benefit in moving
2915
0
                // it and we can just continue to the next item.
2916
0
                if likely(guard.is_in_same_group(i, new_i, hash)) {
2917
0
                    guard.set_ctrl_hash(i, hash);
2918
0
                    continue 'outer;
2919
0
                }
2920
0
2921
0
                let new_i_p = guard.bucket_ptr(new_i, size_of);
2922
0
2923
0
                // We are moving the current item to a new position. Write
2924
0
                // our H2 to the control byte of the new position.
2925
0
                let prev_ctrl = guard.replace_ctrl_hash(new_i, hash);
2926
0
                if prev_ctrl == Tag::EMPTY {
2927
0
                    guard.set_ctrl(i, Tag::EMPTY);
2928
0
                    // If the target slot is empty, simply move the current
2929
0
                    // element into the new slot and clear the old control
2930
0
                    // byte.
2931
0
                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
2932
0
                    continue 'outer;
2933
                } else {
2934
                    // If the target slot is occupied, swap the two elements
2935
                    // and then continue processing the element that we just
2936
                    // swapped into the old slot.
2937
0
                    debug_assert_eq!(prev_ctrl, Tag::DELETED);
2938
0
                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
2939
0
                    continue 'inner;
2940
                }
2941
            }
2942
        }
2943
2944
0
        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
2945
0
2946
0
        mem::forget(guard);
2947
0
    }
2948
2949
    /// Deallocates the table without dropping any entries.
2950
    ///
2951
    /// # Note
2952
    ///
2953
    /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
2954
    /// else it can lead to leaking of memory. Also calling this function automatically
2955
    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
2956
    /// (dangling) the `ctrl` field of the table.
2957
    ///
2958
    /// # Safety
2959
    ///
2960
    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
2961
    ///
2962
    /// * The [`RawTableInner`] has already been allocated;
2963
    ///
2964
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2965
    ///   to allocate this table.
2966
    ///
2967
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
2968
    ///   to allocate this table.
2969
    ///
2970
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2971
    ///
2972
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2973
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2974
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2975
    #[inline]
2976
193k
    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
2977
193k
    where
2978
193k
        A: Allocator,
2979
193k
    {
2980
193k
        // SAFETY: The caller must uphold the safety contract for `free_buckets`
2981
193k
        // method.
2982
193k
        let (ptr, layout) = self.allocation_info(table_layout);
2983
193k
        alloc.deallocate(ptr, layout);
2984
193k
    }
2985
2986
    /// Returns a pointer to the allocated memory and the layout that was used to
2987
    /// allocate the table.
2988
    ///
2989
    /// # Safety
2990
    ///
2991
    /// Caller of this function must observe the following safety rules:
2992
    ///
2993
    /// * The [`RawTableInner`] has already been allocated, otherwise
2994
    ///   calling this function results in [`undefined behavior`]
2995
    ///
2996
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2997
    ///   that was used to allocate this table. Failure to comply with this condition
2998
    ///   may result in [`undefined behavior`].
2999
    ///
3000
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3001
    ///
3002
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3003
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3004
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3005
    #[inline]
3006
193k
    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3007
193k
        debug_assert!(
3008
0
            !self.is_empty_singleton(),
3009
0
            "this function can only be called on non-empty tables"
3010
        );
3011
3012
        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
3013
193k
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
3014
193k
            Some(lco) => lco,
3015
0
            None => unsafe { hint::unreachable_unchecked() },
3016
        };
3017
193k
        (
3018
193k
            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
3019
193k
            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
3020
193k
            layout,
3021
193k
        )
3022
193k
    }
3023
3024
    /// Returns the total amount of memory allocated internally by the hash
3025
    /// table, in bytes.
3026
    ///
3027
    /// The returned number is informational only. It is intended to be
3028
    /// primarily used for memory profiling.
3029
    ///
3030
    /// # Safety
3031
    ///
3032
    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3033
    /// that was used to allocate this table. Failure to comply with this condition
3034
    /// may result in [`undefined behavior`].
3035
    ///
3036
    ///
3037
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3038
    #[inline]
3039
    unsafe fn allocation_size_or_zero(&self, table_layout: TableLayout) -> usize {
3040
        if self.is_empty_singleton() {
3041
            0
3042
        } else {
3043
            // SAFETY:
3044
            // 1. We have checked that our table is allocated.
3045
            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
3046
            // that was used to allocate this table.
3047
            unsafe { self.allocation_info(table_layout).1.size() }
3048
        }
3049
    }
3050
3051
    /// Marks all table buckets as empty without dropping their contents.
3052
    #[inline]
3053
193k
    fn clear_no_drop(&mut self) {
3054
193k
        if !self.is_empty_singleton() {
3055
193k
            self.ctrl_slice().fill_empty();
3056
193k
        }
3057
193k
        self.items = 0;
3058
193k
        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
3059
193k
    }
3060
3061
    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
3062
    /// triggered as full, decreases the `items` of the table and, if it can be done,
3063
    /// increases `self.growth_left`.
3064
    ///
3065
    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
3066
    /// does not make any changes to the `data` parts of the table. The caller of this
3067
    /// function must take care to properly drop the `data`, otherwise calling this
3068
    /// function may result in a memory leak.
3069
    ///
3070
    /// # Safety
3071
    ///
3072
    /// You must observe the following safety rules when calling this function:
3073
    ///
3074
    /// * The [`RawTableInner`] has already been allocated;
3075
    ///
3076
    /// * It must be the full control byte at the given position;
3077
    ///
3078
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
3079
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
3080
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
3081
    ///
3082
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
3083
    ///
3084
    /// Calling this function on a table with no elements is unspecified, but calling subsequent
3085
    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
3086
    /// (`self.items -= 1 cause overflow when self.items == 0`).
3087
    ///
3088
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
3089
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
3090
    ///
3091
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
3092
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
3093
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3094
    #[inline]
3095
168k
    unsafe fn erase(&mut self, index: usize) {
3096
168k
        debug_assert!(self.is_bucket_full(index));
3097
3098
        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3099
        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3100
168k
        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3101
168k
        // SAFETY:
3102
168k
        // - The caller must uphold the safety contract for `erase` method;
3103
168k
        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3104
168k
        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3105
168k
        let empty_after = Group::load(self.ctrl(index)).match_empty();
3106
3107
        // Inserting and searching in the map is performed by two key functions:
3108
        //
3109
        // - The `find_insert_slot` function that looks up the index of any `Tag::EMPTY` or `Tag::DELETED`
3110
        //   slot in a group to be able to insert. If it doesn't find an `Tag::EMPTY` or `Tag::DELETED`
3111
        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
3112
        //   and so on until it has gone through all the groups in the control bytes.
3113
        //
3114
        // - The `find_inner` function that looks for the index of the desired element by looking
3115
        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
3116
        //   there is no `Tag::EMPTY` byte in the group, then this means that the `find_insert_slot`
3117
        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
3118
        //   jumps further, and if it does not find the desired element and again there is no `Tag::EMPTY`
3119
        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
3120
        //   finds the desired element or hits an `Tag::EMPTY` slot/byte.
3121
        //
3122
        // Accordingly, this leads to two consequences:
3123
        //
3124
        // - The map must have `Tag::EMPTY` slots (bytes);
3125
        //
3126
        // - You can't just mark the byte to be erased as `Tag::EMPTY`, because otherwise the `find_inner`
3127
        //   function may stumble upon an `Tag::EMPTY` byte before finding the desired element and stop
3128
        //   searching.
3129
        //
3130
        // Thus it is necessary to check all bytes after and before the erased element. If we are in
3131
        // a contiguous `Group` of `FULL` or `Tag::DELETED` bytes (the number of `FULL` or `Tag::DELETED` bytes
3132
        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3133
        // `Tag::DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3134
        // is at least one `Tag::EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3135
        // upon an `Tag::EMPTY` byte, so we can safely mark our erased byte as `Tag::EMPTY` as well.
3136
        //
3137
        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3138
        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3139
        // cannot have `Tag::DELETED` bytes.
3140
        //
3141
        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3142
        // `trailing_zeros` refers to the bytes at the beginning of a group.
3143
168k
        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3144
27
            Tag::DELETED
3145
        } else {
3146
168k
            self.growth_left += 1;
3147
168k
            Tag::EMPTY
3148
        };
3149
        // SAFETY: the caller must uphold the safety contract for `erase` method.
3150
168k
        self.set_ctrl(index, ctrl);
3151
168k
        self.items -= 1;
3152
168k
    }
3153
}
3154
3155
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
3156
    fn clone(&self) -> Self {
3157
        if self.table.is_empty_singleton() {
3158
            Self::new_in(self.alloc.clone())
3159
        } else {
3160
            unsafe {
3161
                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3162
                //
3163
                // SAFETY: This is safe as we are taking the size of an already allocated table
3164
                // and therefore capacity overflow cannot occur, `self.table.buckets()` is power
3165
                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3166
                let mut new_table = match Self::new_uninitialized(
3167
                    self.alloc.clone(),
3168
                    self.table.buckets(),
3169
                    Fallibility::Infallible,
3170
                ) {
3171
                    Ok(table) => table,
3172
                    Err(_) => hint::unreachable_unchecked(),
3173
                };
3174
3175
                // Cloning elements may fail (the clone function may panic). But we don't
3176
                // need to worry about uninitialized control bits, since:
3177
                // 1. The number of items (elements) in the table is zero, which means that
3178
                //    the control bits will not be read by Drop function.
3179
                // 2. The `clone_from_spec` method will first copy all control bits from
3180
                //    `self` (thus initializing them). But this will not affect the `Drop`
3181
                //    function, since the `clone_from_spec` function sets `items` only after
3182
                //    successfully cloning all elements.
3183
                new_table.clone_from_spec(self);
3184
                new_table
3185
            }
3186
        }
3187
    }
3188
3189
    fn clone_from(&mut self, source: &Self) {
3190
        if source.table.is_empty_singleton() {
3191
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3192
            unsafe {
3193
                // SAFETY:
3194
                // 1. We call the function only once;
3195
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3196
                //    and [`TableLayout`] that were used to allocate this table.
3197
                // 3. If any elements' drop function panics, then there will only be a memory leak,
3198
                //    because we have replaced the inner table with a new one.
3199
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3200
            }
3201
        } else {
3202
            unsafe {
3203
                // Make sure that if any panics occurs, we clear the table and
3204
                // leave it in an empty state.
3205
                let mut self_ = guard(self, |self_| {
3206
                    self_.clear_no_drop();
3207
                });
3208
3209
                // First, drop all our elements without clearing the control
3210
                // bytes. If this panics then the scope guard will clear the
3211
                // table, leaking any elements that were not dropped yet.
3212
                //
3213
                // This leak is unavoidable: we can't try dropping more elements
3214
                // since this could lead to another panic and abort the process.
3215
                //
3216
                // SAFETY: If something gets wrong we clear our table right after
3217
                // dropping the elements, so there is no double drop, since `items`
3218
                // will be equal to zero.
3219
                self_.table.drop_elements::<T>();
3220
3221
                // If necessary, resize our table to match the source.
3222
                if self_.buckets() != source.buckets() {
3223
                    let new_inner = match RawTableInner::new_uninitialized(
3224
                        &self_.alloc,
3225
                        Self::TABLE_LAYOUT,
3226
                        source.buckets(),
3227
                        Fallibility::Infallible,
3228
                    ) {
3229
                        Ok(table) => table,
3230
                        Err(_) => hint::unreachable_unchecked(),
3231
                    };
3232
                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3233
                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3234
                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3235
                    if !old_inner.is_empty_singleton() {
3236
                        // SAFETY:
3237
                        // 1. We have checked that our table is allocated.
3238
                        // 2. We know for sure that `alloc` and `table_layout` matches
3239
                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3240
                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3241
                    }
3242
                }
3243
3244
                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3245
                // inside the `clone_from_impl` function will take care of that, dropping all
3246
                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3247
                self_.clone_from_spec(source);
3248
3249
                // Disarm the scope guard if cloning was successful.
3250
                ScopeGuard::into_inner(self_);
3251
            }
3252
        }
3253
    }
3254
}
3255
3256
/// Specialization of `clone_from` for `Copy` types
3257
trait RawTableClone {
3258
    unsafe fn clone_from_spec(&mut self, source: &Self);
3259
}
3260
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3261
    default_fn! {
3262
        #[cfg_attr(feature = "inline-more", inline)]
3263
        unsafe fn clone_from_spec(&mut self, source: &Self) {
3264
            self.clone_from_impl(source);
3265
        }
3266
    }
3267
}
3268
#[cfg(feature = "nightly")]
3269
impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3270
    #[cfg_attr(feature = "inline-more", inline)]
3271
    unsafe fn clone_from_spec(&mut self, source: &Self) {
3272
        source
3273
            .table
3274
            .ctrl(0)
3275
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3276
        source
3277
            .data_start()
3278
            .as_ptr()
3279
            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3280
3281
        self.table.items = source.table.items;
3282
        self.table.growth_left = source.table.growth_left;
3283
    }
3284
}
3285
3286
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
3287
    /// Common code for `clone` and `clone_from`. Assumes:
3288
    /// - `self.buckets() == source.buckets()`.
3289
    /// - Any existing elements have been dropped.
3290
    /// - The control bytes are not initialized yet.
3291
    #[cfg_attr(feature = "inline-more", inline)]
3292
    unsafe fn clone_from_impl(&mut self, source: &Self) {
3293
        // Copy the control bytes unchanged. We do this in a single pass
3294
        source
3295
            .table
3296
            .ctrl(0)
3297
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3298
3299
        // The cloning of elements may panic, in which case we need
3300
        // to make sure we drop only the elements that have been
3301
        // cloned so far.
3302
        let mut guard = guard((0, &mut *self), |(index, self_)| {
3303
            if T::NEEDS_DROP {
3304
                for i in 0..*index {
3305
                    if self_.is_bucket_full(i) {
3306
                        self_.bucket(i).drop();
3307
                    }
3308
                }
3309
            }
3310
        });
3311
3312
        for from in source.iter() {
3313
            let index = source.bucket_index(&from);
3314
            let to = guard.1.bucket(index);
3315
            to.write(from.as_ref().clone());
3316
3317
            // Update the index in case we need to unwind.
3318
            guard.0 = index + 1;
3319
        }
3320
3321
        // Successfully cloned all items, no need to clean up.
3322
        mem::forget(guard);
3323
3324
        self.table.items = source.table.items;
3325
        self.table.growth_left = source.table.growth_left;
3326
    }
3327
}
3328
3329
impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3330
    #[inline]
3331
    fn default() -> Self {
3332
        Self::new_in(Default::default())
3333
    }
3334
}
3335
3336
#[cfg(feature = "nightly")]
3337
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3338
    #[cfg_attr(feature = "inline-more", inline)]
3339
    fn drop(&mut self) {
3340
        unsafe {
3341
            // SAFETY:
3342
            // 1. We call the function only once;
3343
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3344
            //    and [`TableLayout`] that were used to allocate this table.
3345
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3346
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3347
            //    so there won't be any table left in an inconsistent state.
3348
            self.table
3349
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3350
        }
3351
    }
3352
}
3353
#[cfg(not(feature = "nightly"))]
3354
impl<T, A: Allocator> Drop for RawTable<T, A> {
3355
    #[cfg_attr(feature = "inline-more", inline)]
3356
193k
    fn drop(&mut self) {
3357
193k
        unsafe {
3358
193k
            // SAFETY:
3359
193k
            // 1. We call the function only once;
3360
193k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361
193k
            //    and [`TableLayout`] that were used to allocate this table.
3362
193k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363
193k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364
193k
            //    so there won't be any table left in an inconsistent state.
3365
193k
            self.table
3366
193k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367
193k
        }
3368
193k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3356
38.7k
    fn drop(&mut self) {
3357
38.7k
        unsafe {
3358
38.7k
            // SAFETY:
3359
38.7k
            // 1. We call the function only once;
3360
38.7k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361
38.7k
            //    and [`TableLayout`] that were used to allocate this table.
3362
38.7k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363
38.7k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364
38.7k
            //    so there won't be any table left in an inconsistent state.
3365
38.7k
            self.table
3366
38.7k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367
38.7k
        }
3368
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3356
38.7k
    fn drop(&mut self) {
3357
38.7k
        unsafe {
3358
38.7k
            // SAFETY:
3359
38.7k
            // 1. We call the function only once;
3360
38.7k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361
38.7k
            //    and [`TableLayout`] that were used to allocate this table.
3362
38.7k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363
38.7k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364
38.7k
            //    so there won't be any table left in an inconsistent state.
3365
38.7k
            self.table
3366
38.7k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367
38.7k
        }
3368
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3356
38.7k
    fn drop(&mut self) {
3357
38.7k
        unsafe {
3358
38.7k
            // SAFETY:
3359
38.7k
            // 1. We call the function only once;
3360
38.7k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361
38.7k
            //    and [`TableLayout`] that were used to allocate this table.
3362
38.7k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363
38.7k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364
38.7k
            //    so there won't be any table left in an inconsistent state.
3365
38.7k
            self.table
3366
38.7k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367
38.7k
        }
3368
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3356
38.7k
    fn drop(&mut self) {
3357
38.7k
        unsafe {
3358
38.7k
            // SAFETY:
3359
38.7k
            // 1. We call the function only once;
3360
38.7k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361
38.7k
            //    and [`TableLayout`] that were used to allocate this table.
3362
38.7k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363
38.7k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364
38.7k
            //    so there won't be any table left in an inconsistent state.
3365
38.7k
            self.table
3366
38.7k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367
38.7k
        }
3368
38.7k
    }
<hashbrown::raw::RawTable<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3356
38.7k
    fn drop(&mut self) {
3357
38.7k
        unsafe {
3358
38.7k
            // SAFETY:
3359
38.7k
            // 1. We call the function only once;
3360
38.7k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361
38.7k
            //    and [`TableLayout`] that were used to allocate this table.
3362
38.7k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363
38.7k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364
38.7k
            //    so there won't be any table left in an inconsistent state.
3365
38.7k
            self.table
3366
38.7k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367
38.7k
        }
3368
38.7k
    }
3369
}
3370
3371
impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3372
    type Item = T;
3373
    type IntoIter = RawIntoIter<T, A>;
3374
3375
    #[cfg_attr(feature = "inline-more", inline)]
3376
    fn into_iter(self) -> RawIntoIter<T, A> {
3377
        unsafe {
3378
            let iter = self.iter();
3379
            self.into_iter_from(iter)
3380
        }
3381
    }
3382
}
3383
3384
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3385
/// not track an item count.
3386
pub(crate) struct RawIterRange<T> {
3387
    // Mask of full buckets in the current group. Bits are cleared from this
3388
    // mask as each element is processed.
3389
    current_group: BitMaskIter,
3390
3391
    // Pointer to the buckets for the current group.
3392
    data: Bucket<T>,
3393
3394
    // Pointer to the next group of control bytes,
3395
    // Must be aligned to the group size.
3396
    next_ctrl: *const u8,
3397
3398
    // Pointer one past the last control byte of this range.
3399
    end: *const u8,
3400
}
3401
3402
impl<T> RawIterRange<T> {
3403
    /// Returns a `RawIterRange` covering a subset of a table.
3404
    ///
3405
    /// # Safety
3406
    ///
3407
    /// If any of the following conditions are violated, the result is
3408
    /// [`undefined behavior`]:
3409
    ///
3410
    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3411
    ///
3412
    /// * `ctrl` must be properly aligned to the group size (`Group::WIDTH`);
3413
    ///
3414
    /// * `ctrl` must point to the array of properly initialized control bytes;
3415
    ///
3416
    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3417
    ///
3418
    /// * the value of `len` must be less than or equal to the number of table buckets,
3419
    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3420
    ///   must be positive.
3421
    ///
3422
    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3423
    ///   byte past the end of the same [allocated table].
3424
    ///
3425
    /// * The `len` must be a power of two.
3426
    ///
3427
    /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
3428
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3429
    #[cfg_attr(feature = "inline-more", inline)]
3430
193k
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431
193k
        debug_assert_ne!(len, 0);
3432
193k
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434
193k
        let end = ctrl.add(len);
3435
193k
3436
193k
        // Load the first group and advance ctrl to point to the next group
3437
193k
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438
193k
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439
193k
        let next_ctrl = ctrl.add(Group::WIDTH);
3440
193k
3441
193k
        Self {
3442
193k
            current_group: current_group.into_iter(),
3443
193k
            data,
3444
193k
            next_ctrl,
3445
193k
            end,
3446
193k
        }
3447
193k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::new
Line
Count
Source
3430
38.7k
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431
38.7k
        debug_assert_ne!(len, 0);
3432
38.7k
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434
38.7k
        let end = ctrl.add(len);
3435
38.7k
3436
38.7k
        // Load the first group and advance ctrl to point to the next group
3437
38.7k
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438
38.7k
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439
38.7k
        let next_ctrl = ctrl.add(Group::WIDTH);
3440
38.7k
3441
38.7k
        Self {
3442
38.7k
            current_group: current_group.into_iter(),
3443
38.7k
            data,
3444
38.7k
            next_ctrl,
3445
38.7k
            end,
3446
38.7k
        }
3447
38.7k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::new
Line
Count
Source
3430
38.7k
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431
38.7k
        debug_assert_ne!(len, 0);
3432
38.7k
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434
38.7k
        let end = ctrl.add(len);
3435
38.7k
3436
38.7k
        // Load the first group and advance ctrl to point to the next group
3437
38.7k
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438
38.7k
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439
38.7k
        let next_ctrl = ctrl.add(Group::WIDTH);
3440
38.7k
3441
38.7k
        Self {
3442
38.7k
            current_group: current_group.into_iter(),
3443
38.7k
            data,
3444
38.7k
            next_ctrl,
3445
38.7k
            end,
3446
38.7k
        }
3447
38.7k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::new
Line
Count
Source
3430
38.7k
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431
38.7k
        debug_assert_ne!(len, 0);
3432
38.7k
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434
38.7k
        let end = ctrl.add(len);
3435
38.7k
3436
38.7k
        // Load the first group and advance ctrl to point to the next group
3437
38.7k
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438
38.7k
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439
38.7k
        let next_ctrl = ctrl.add(Group::WIDTH);
3440
38.7k
3441
38.7k
        Self {
3442
38.7k
            current_group: current_group.into_iter(),
3443
38.7k
            data,
3444
38.7k
            next_ctrl,
3445
38.7k
            end,
3446
38.7k
        }
3447
38.7k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::new
Line
Count
Source
3430
38.7k
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431
38.7k
        debug_assert_ne!(len, 0);
3432
38.7k
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434
38.7k
        let end = ctrl.add(len);
3435
38.7k
3436
38.7k
        // Load the first group and advance ctrl to point to the next group
3437
38.7k
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438
38.7k
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439
38.7k
        let next_ctrl = ctrl.add(Group::WIDTH);
3440
38.7k
3441
38.7k
        Self {
3442
38.7k
            current_group: current_group.into_iter(),
3443
38.7k
            data,
3444
38.7k
            next_ctrl,
3445
38.7k
            end,
3446
38.7k
        }
3447
38.7k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::new
Line
Count
Source
3430
38.7k
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431
38.7k
        debug_assert_ne!(len, 0);
3432
38.7k
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434
38.7k
        let end = ctrl.add(len);
3435
38.7k
3436
38.7k
        // Load the first group and advance ctrl to point to the next group
3437
38.7k
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438
38.7k
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439
38.7k
        let next_ctrl = ctrl.add(Group::WIDTH);
3440
38.7k
3441
38.7k
        Self {
3442
38.7k
            current_group: current_group.into_iter(),
3443
38.7k
            data,
3444
38.7k
            next_ctrl,
3445
38.7k
            end,
3446
38.7k
        }
3447
38.7k
    }
3448
3449
    /// Splits a `RawIterRange` into two halves.
3450
    ///
3451
    /// Returns `None` if the remaining range is smaller than or equal to the
3452
    /// group width.
3453
    #[cfg_attr(feature = "inline-more", inline)]
3454
    #[cfg(feature = "rayon")]
3455
    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3456
        unsafe {
3457
            if self.end <= self.next_ctrl {
3458
                // Nothing to split if the group that we are current processing
3459
                // is the last one.
3460
                (self, None)
3461
            } else {
3462
                // len is the remaining number of elements after the group that
3463
                // we are currently processing. It must be a multiple of the
3464
                // group size (small tables are caught by the check above).
3465
                let len = offset_from(self.end, self.next_ctrl);
3466
                debug_assert_eq!(len % Group::WIDTH, 0);
3467
3468
                // Split the remaining elements into two halves, but round the
3469
                // midpoint down in case there is an odd number of groups
3470
                // remaining. This ensures that:
3471
                // - The tail is at least 1 group long.
3472
                // - The split is roughly even considering we still have the
3473
                //   current group to process.
3474
                let mid = (len / 2) & !(Group::WIDTH - 1);
3475
3476
                let tail = Self::new(
3477
                    self.next_ctrl.add(mid),
3478
                    self.data.next_n(Group::WIDTH).next_n(mid),
3479
                    len - mid,
3480
                );
3481
                debug_assert_eq!(
3482
                    self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3483
                    tail.data.ptr
3484
                );
3485
                debug_assert_eq!(self.end, tail.end);
3486
                self.end = self.next_ctrl.add(mid);
3487
                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3488
                (self, Some(tail))
3489
            }
3490
        }
3491
    }
3492
3493
    /// # Safety
3494
    /// If `DO_CHECK_PTR_RANGE` is false, caller must ensure that we never try to iterate
3495
    /// after yielding all elements.
3496
    #[cfg_attr(feature = "inline-more", inline)]
3497
168k
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498
        loop {
3499
1.30M
            if let Some(index) = self.current_group.next() {
3500
168k
                return Some(self.data.next_n(index));
3501
1.13M
            }
3502
1.13M
3503
1.13M
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504
0
                return None;
3505
1.13M
            }
3506
1.13M
3507
1.13M
            // We might read past self.end up to the next group boundary,
3508
1.13M
            // but this is fine because it only occurs on tables smaller
3509
1.13M
            // than the group size where the trailing control bytes are all
3510
1.13M
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511
1.13M
            // to the group size (since tables are power-of-two sized).
3512
1.13M
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513
1.13M
                .match_full()
3514
1.13M
                .into_iter();
3515
1.13M
            self.data = self.data.next_n(Group::WIDTH);
3516
1.13M
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517
        }
3518
168k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::next_impl::<false>
Line
Count
Source
3497
40.6k
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498
        loop {
3499
578k
            if let Some(index) = self.current_group.next() {
3500
40.6k
                return Some(self.data.next_n(index));
3501
537k
            }
3502
537k
3503
537k
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504
0
                return None;
3505
537k
            }
3506
537k
3507
537k
            // We might read past self.end up to the next group boundary,
3508
537k
            // but this is fine because it only occurs on tables smaller
3509
537k
            // than the group size where the trailing control bytes are all
3510
537k
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511
537k
            // to the group size (since tables are power-of-two sized).
3512
537k
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513
537k
                .match_full()
3514
537k
                .into_iter();
3515
537k
            self.data = self.data.next_n(Group::WIDTH);
3516
537k
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517
        }
3518
40.6k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::next_impl::<false>
Line
Count
Source
3497
92.5k
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498
        loop {
3499
442k
            if let Some(index) = self.current_group.next() {
3500
92.5k
                return Some(self.data.next_n(index));
3501
349k
            }
3502
349k
3503
349k
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504
0
                return None;
3505
349k
            }
3506
349k
3507
349k
            // We might read past self.end up to the next group boundary,
3508
349k
            // but this is fine because it only occurs on tables smaller
3509
349k
            // than the group size where the trailing control bytes are all
3510
349k
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511
349k
            // to the group size (since tables are power-of-two sized).
3512
349k
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513
349k
                .match_full()
3514
349k
                .into_iter();
3515
349k
            self.data = self.data.next_n(Group::WIDTH);
3516
349k
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517
        }
3518
92.5k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::next_impl::<false>
Line
Count
Source
3497
14.2k
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498
        loop {
3499
42.6k
            if let Some(index) = self.current_group.next() {
3500
14.2k
                return Some(self.data.next_n(index));
3501
28.4k
            }
3502
28.4k
3503
28.4k
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504
0
                return None;
3505
28.4k
            }
3506
28.4k
3507
28.4k
            // We might read past self.end up to the next group boundary,
3508
28.4k
            // but this is fine because it only occurs on tables smaller
3509
28.4k
            // than the group size where the trailing control bytes are all
3510
28.4k
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511
28.4k
            // to the group size (since tables are power-of-two sized).
3512
28.4k
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513
28.4k
                .match_full()
3514
28.4k
                .into_iter();
3515
28.4k
            self.data = self.data.next_n(Group::WIDTH);
3516
28.4k
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517
        }
3518
14.2k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::next_impl::<false>
Line
Count
Source
3497
12.4k
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498
        loop {
3499
219k
            if let Some(index) = self.current_group.next() {
3500
12.4k
                return Some(self.data.next_n(index));
3501
207k
            }
3502
207k
3503
207k
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504
0
                return None;
3505
207k
            }
3506
207k
3507
207k
            // We might read past self.end up to the next group boundary,
3508
207k
            // but this is fine because it only occurs on tables smaller
3509
207k
            // than the group size where the trailing control bytes are all
3510
207k
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511
207k
            // to the group size (since tables are power-of-two sized).
3512
207k
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513
207k
                .match_full()
3514
207k
                .into_iter();
3515
207k
            self.data = self.data.next_n(Group::WIDTH);
3516
207k
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517
        }
3518
12.4k
    }
<hashbrown::raw::RawIterRange<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::next_impl::<false>
Line
Count
Source
3497
8.65k
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498
        loop {
3499
19.9k
            if let Some(index) = self.current_group.next() {
3500
8.65k
                return Some(self.data.next_n(index));
3501
11.2k
            }
3502
11.2k
3503
11.2k
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504
0
                return None;
3505
11.2k
            }
3506
11.2k
3507
11.2k
            // We might read past self.end up to the next group boundary,
3508
11.2k
            // but this is fine because it only occurs on tables smaller
3509
11.2k
            // than the group size where the trailing control bytes are all
3510
11.2k
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511
11.2k
            // to the group size (since tables are power-of-two sized).
3512
11.2k
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513
11.2k
                .match_full()
3514
11.2k
                .into_iter();
3515
11.2k
            self.data = self.data.next_n(Group::WIDTH);
3516
11.2k
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517
        }
3518
8.65k
    }
3519
3520
    /// Folds every element into an accumulator by applying an operation,
3521
    /// returning the final result.
3522
    ///
3523
    /// `fold_impl()` takes three arguments: the number of items remaining in
3524
    /// the iterator, an initial value, and a closure with two arguments: an
3525
    /// 'accumulator', and an element. The closure returns the value that the
3526
    /// accumulator should have for the next iteration.
3527
    ///
3528
    /// The initial value is the value the accumulator will have on the first call.
3529
    ///
3530
    /// After applying this closure to every element of the iterator, `fold_impl()`
3531
    /// returns the accumulator.
3532
    ///
3533
    /// # Safety
3534
    ///
3535
    /// If any of the following conditions are violated, the result is
3536
    /// [`Undefined Behavior`]:
3537
    ///
3538
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3539
    ///   i.e. table outlives the `RawIterRange`;
3540
    ///
3541
    /// * The provided `n` value must match the actual number of items
3542
    ///   in the table.
3543
    ///
3544
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3545
    #[allow(clippy::while_let_on_iterator)]
3546
    #[cfg_attr(feature = "inline-more", inline)]
3547
    unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
3548
    where
3549
        F: FnMut(B, Bucket<T>) -> B,
3550
    {
3551
        loop {
3552
            while let Some(index) = self.current_group.next() {
3553
                // The returned `index` will always be in the range `0..Group::WIDTH`,
3554
                // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
3555
                debug_assert!(n != 0);
3556
                let bucket = self.data.next_n(index);
3557
                acc = f(acc, bucket);
3558
                n -= 1;
3559
            }
3560
3561
            if n == 0 {
3562
                return acc;
3563
            }
3564
3565
            // SAFETY: The caller of this function ensures that:
3566
            //
3567
            // 1. The provided `n` value matches the actual number of items in the table;
3568
            // 2. The table is alive and did not moved.
3569
            //
3570
            // Taking the above into account, we always stay within the bounds, because:
3571
            //
3572
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3573
            //    we will never end up in the given branch, since we should have already
3574
            //    yielded all the elements of the table.
3575
            //
3576
            // 2. For tables larger than the group width. The number of buckets is a
3577
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3578
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3579
            //    start of the array of control bytes, and never try to iterate after
3580
            //    getting all the elements, the last `self.current_group` will read bytes
3581
            //    from the `self.buckets() - Group::WIDTH` index.  We know also that
3582
            //    `self.current_group.next()` will always return indices within the range
3583
            //    `0..Group::WIDTH`.
3584
            //
3585
            //    Knowing all of the above and taking into account that we are synchronizing
3586
            //    the `self.data` index with the index we used to read the `self.current_group`,
3587
            //    the subsequent `self.data.next_n(index)` will always return a bucket with
3588
            //    an index number less than `self.buckets()`.
3589
            //
3590
            //    The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
3591
            //    actually be read, since we should have already yielded all the elements of
3592
            //    the table.
3593
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3594
                .match_full()
3595
                .into_iter();
3596
            self.data = self.data.next_n(Group::WIDTH);
3597
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3598
        }
3599
    }
3600
}
3601
3602
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3603
// in the actual iterator implementations determine the real Send/Sync bounds.
3604
unsafe impl<T> Send for RawIterRange<T> {}
3605
unsafe impl<T> Sync for RawIterRange<T> {}
3606
3607
impl<T> Clone for RawIterRange<T> {
3608
    #[cfg_attr(feature = "inline-more", inline)]
3609
    fn clone(&self) -> Self {
3610
        Self {
3611
            data: self.data.clone(),
3612
            next_ctrl: self.next_ctrl,
3613
            current_group: self.current_group.clone(),
3614
            end: self.end,
3615
        }
3616
    }
3617
}
3618
3619
impl<T> Iterator for RawIterRange<T> {
3620
    type Item = Bucket<T>;
3621
3622
    #[cfg_attr(feature = "inline-more", inline)]
3623
    fn next(&mut self) -> Option<Bucket<T>> {
3624
        unsafe {
3625
            // SAFETY: We set checker flag to true.
3626
            self.next_impl::<true>()
3627
        }
3628
    }
3629
3630
    #[inline]
3631
    fn size_hint(&self) -> (usize, Option<usize>) {
3632
        // We don't have an item count, so just guess based on the range size.
3633
        let remaining_buckets = if self.end > self.next_ctrl {
3634
            unsafe { offset_from(self.end, self.next_ctrl) }
3635
        } else {
3636
            0
3637
        };
3638
3639
        // Add a group width to include the group we are currently processing.
3640
        (0, Some(Group::WIDTH + remaining_buckets))
3641
    }
3642
}
3643
3644
impl<T> FusedIterator for RawIterRange<T> {}
3645
3646
/// Iterator which returns a raw pointer to every full bucket in the table.
3647
///
3648
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3649
/// must observe several rules when using it:
3650
/// - You must not free the hash table while iterating (including via growing/shrinking).
3651
/// - It is fine to erase a bucket that has been yielded by the iterator.
3652
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3653
///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3654
/// - It is unspecified whether an element inserted after the iterator was
3655
///   created will be yielded by that iterator (unless `reflect_insert` is called).
3656
/// - The order in which the iterator yields bucket is unspecified and may
3657
///   change in the future.
3658
pub struct RawIter<T> {
3659
    pub(crate) iter: RawIterRange<T>,
3660
    items: usize,
3661
}
3662
3663
impl<T> RawIter<T> {
3664
193k
    unsafe fn drop_elements(&mut self) {
3665
193k
        if T::NEEDS_DROP && self.items != 0 {
3666
0
            for item in self {
3667
0
                item.drop();
3668
0
            }
3669
193k
        }
3670
193k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)>>::drop_elements
Line
Count
Source
3664
38.7k
    unsafe fn drop_elements(&mut self) {
3665
38.7k
        if T::NEEDS_DROP && self.items != 0 {
3666
0
            for item in self {
3667
0
                item.drop();
3668
0
            }
3669
38.7k
        }
3670
38.7k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)>>::drop_elements
Line
Count
Source
3664
38.7k
    unsafe fn drop_elements(&mut self) {
3665
38.7k
        if T::NEEDS_DROP && self.items != 0 {
3666
0
            for item in self {
3667
0
                item.drop();
3668
0
            }
3669
38.7k
        }
3670
38.7k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)>>::drop_elements
Line
Count
Source
3664
38.7k
    unsafe fn drop_elements(&mut self) {
3665
38.7k
        if T::NEEDS_DROP && self.items != 0 {
3666
0
            for item in self {
3667
0
                item.drop();
3668
0
            }
3669
38.7k
        }
3670
38.7k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)>>::drop_elements
Line
Count
Source
3664
38.7k
    unsafe fn drop_elements(&mut self) {
3665
38.7k
        if T::NEEDS_DROP && self.items != 0 {
3666
0
            for item in self {
3667
0
                item.drop();
3668
0
            }
3669
38.7k
        }
3670
38.7k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)>>::drop_elements
Line
Count
Source
3664
38.7k
    unsafe fn drop_elements(&mut self) {
3665
38.7k
        if T::NEEDS_DROP && self.items != 0 {
3666
0
            for item in self {
3667
0
                item.drop();
3668
0
            }
3669
38.7k
        }
3670
38.7k
    }
3671
}
3672
3673
impl<T> Clone for RawIter<T> {
3674
    #[cfg_attr(feature = "inline-more", inline)]
3675
    fn clone(&self) -> Self {
3676
        Self {
3677
            iter: self.iter.clone(),
3678
            items: self.items,
3679
        }
3680
    }
3681
}
3682
impl<T> Default for RawIter<T> {
3683
    #[cfg_attr(feature = "inline-more", inline)]
3684
    fn default() -> Self {
3685
        // SAFETY: Because the table is static, it always outlives the iter.
3686
        unsafe { RawTableInner::NEW.iter() }
3687
    }
3688
}
3689
3690
impl<T> Iterator for RawIter<T> {
3691
    type Item = Bucket<T>;
3692
3693
    #[cfg_attr(feature = "inline-more", inline)]
3694
362k
    fn next(&mut self) -> Option<Bucket<T>> {
3695
362k
        // Inner iterator iterates over buckets
3696
362k
        // so it can do unnecessary work if we already yielded all items.
3697
362k
        if self.items == 0 {
3698
193k
            return None;
3699
168k
        }
3700
168k
3701
168k
        let nxt = unsafe {
3702
168k
            // SAFETY: We check number of items to yield using `items` field.
3703
168k
            self.iter.next_impl::<false>()
3704
168k
        };
3705
168k
3706
168k
        debug_assert!(nxt.is_some());
3707
168k
        self.items -= 1;
3708
168k
3709
168k
        nxt
3710
362k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3694
79.4k
    fn next(&mut self) -> Option<Bucket<T>> {
3695
79.4k
        // Inner iterator iterates over buckets
3696
79.4k
        // so it can do unnecessary work if we already yielded all items.
3697
79.4k
        if self.items == 0 {
3698
38.7k
            return None;
3699
40.6k
        }
3700
40.6k
3701
40.6k
        let nxt = unsafe {
3702
40.6k
            // SAFETY: We check number of items to yield using `items` field.
3703
40.6k
            self.iter.next_impl::<false>()
3704
40.6k
        };
3705
40.6k
3706
40.6k
        debug_assert!(nxt.is_some());
3707
40.6k
        self.items -= 1;
3708
40.6k
3709
40.6k
        nxt
3710
79.4k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3694
131k
    fn next(&mut self) -> Option<Bucket<T>> {
3695
131k
        // Inner iterator iterates over buckets
3696
131k
        // so it can do unnecessary work if we already yielded all items.
3697
131k
        if self.items == 0 {
3698
38.7k
            return None;
3699
92.5k
        }
3700
92.5k
3701
92.5k
        let nxt = unsafe {
3702
92.5k
            // SAFETY: We check number of items to yield using `items` field.
3703
92.5k
            self.iter.next_impl::<false>()
3704
92.5k
        };
3705
92.5k
3706
92.5k
        debug_assert!(nxt.is_some());
3707
92.5k
        self.items -= 1;
3708
92.5k
3709
92.5k
        nxt
3710
131k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3694
52.9k
    fn next(&mut self) -> Option<Bucket<T>> {
3695
52.9k
        // Inner iterator iterates over buckets
3696
52.9k
        // so it can do unnecessary work if we already yielded all items.
3697
52.9k
        if self.items == 0 {
3698
38.7k
            return None;
3699
14.2k
        }
3700
14.2k
3701
14.2k
        let nxt = unsafe {
3702
14.2k
            // SAFETY: We check number of items to yield using `items` field.
3703
14.2k
            self.iter.next_impl::<false>()
3704
14.2k
        };
3705
14.2k
3706
14.2k
        debug_assert!(nxt.is_some());
3707
14.2k
        self.items -= 1;
3708
14.2k
3709
14.2k
        nxt
3710
52.9k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3694
51.1k
    fn next(&mut self) -> Option<Bucket<T>> {
3695
51.1k
        // Inner iterator iterates over buckets
3696
51.1k
        // so it can do unnecessary work if we already yielded all items.
3697
51.1k
        if self.items == 0 {
3698
38.7k
            return None;
3699
12.4k
        }
3700
12.4k
3701
12.4k
        let nxt = unsafe {
3702
12.4k
            // SAFETY: We check number of items to yield using `items` field.
3703
12.4k
            self.iter.next_impl::<false>()
3704
12.4k
        };
3705
12.4k
3706
12.4k
        debug_assert!(nxt.is_some());
3707
12.4k
        self.items -= 1;
3708
12.4k
3709
12.4k
        nxt
3710
51.1k
    }
<hashbrown::raw::RawIter<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3694
47.4k
    fn next(&mut self) -> Option<Bucket<T>> {
3695
47.4k
        // Inner iterator iterates over buckets
3696
47.4k
        // so it can do unnecessary work if we already yielded all items.
3697
47.4k
        if self.items == 0 {
3698
38.7k
            return None;
3699
8.65k
        }
3700
8.65k
3701
8.65k
        let nxt = unsafe {
3702
8.65k
            // SAFETY: We check number of items to yield using `items` field.
3703
8.65k
            self.iter.next_impl::<false>()
3704
8.65k
        };
3705
8.65k
3706
8.65k
        debug_assert!(nxt.is_some());
3707
8.65k
        self.items -= 1;
3708
8.65k
3709
8.65k
        nxt
3710
47.4k
    }
3711
3712
    #[inline]
3713
    fn size_hint(&self) -> (usize, Option<usize>) {
3714
        (self.items, Some(self.items))
3715
    }
3716
3717
    #[inline]
3718
    fn fold<B, F>(self, init: B, f: F) -> B
3719
    where
3720
        Self: Sized,
3721
        F: FnMut(B, Self::Item) -> B,
3722
    {
3723
        unsafe { self.iter.fold_impl(self.items, init, f) }
3724
    }
3725
}
3726
3727
impl<T> ExactSizeIterator for RawIter<T> {}
3728
impl<T> FusedIterator for RawIter<T> {}
3729
3730
/// Iterator which returns an index of every full bucket in the table.
3731
///
3732
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3733
/// must observe several rules when using it:
3734
/// - You must not free the hash table while iterating (including via growing/shrinking).
3735
/// - It is fine to erase a bucket that has been yielded by the iterator.
3736
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3737
///   result in the iterator yielding index of that bucket.
3738
/// - It is unspecified whether an element inserted after the iterator was
3739
///   created will be yielded by that iterator.
3740
/// - The order in which the iterator yields indices of the buckets is unspecified
3741
///   and may change in the future.
3742
pub(crate) struct FullBucketsIndices {
3743
    // Mask of full buckets in the current group. Bits are cleared from this
3744
    // mask as each element is processed.
3745
    current_group: BitMaskIter,
3746
3747
    // Initial value of the bytes' indices of the current group (relative
3748
    // to the start of the control bytes).
3749
    group_first_index: usize,
3750
3751
    // Pointer to the current group of control bytes,
3752
    // Must be aligned to the group size (Group::WIDTH).
3753
    ctrl: NonNull<u8>,
3754
3755
    // Number of elements in the table.
3756
    items: usize,
3757
}
3758
3759
impl FullBucketsIndices {
3760
    /// Advances the iterator and returns the next value.
3761
    ///
3762
    /// # Safety
3763
    ///
3764
    /// If any of the following conditions are violated, the result is
3765
    /// [`Undefined Behavior`]:
3766
    ///
3767
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3768
    ///   i.e. table outlives the `FullBucketsIndices`;
3769
    ///
3770
    /// * It never tries to iterate after getting all elements.
3771
    ///
3772
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3773
    #[inline(always)]
3774
0
    unsafe fn next_impl(&mut self) -> Option<usize> {
3775
        loop {
3776
0
            if let Some(index) = self.current_group.next() {
3777
                // The returned `self.group_first_index + index` will always
3778
                // be in the range `0..self.buckets()`. See explanation below.
3779
0
                return Some(self.group_first_index + index);
3780
0
            }
3781
0
3782
0
            // SAFETY: The caller of this function ensures that:
3783
0
            //
3784
0
            // 1. It never tries to iterate after getting all the elements;
3785
0
            // 2. The table is alive and did not moved;
3786
0
            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
3787
0
            //
3788
0
            // Taking the above into account, we always stay within the bounds, because:
3789
0
            //
3790
0
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3791
0
            //    we will never end up in the given branch, since we should have already
3792
0
            //    yielded all the elements of the table.
3793
0
            //
3794
0
            // 2. For tables larger than the group width. The number of buckets is a
3795
0
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3796
0
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3797
0
            //    the start of the array of control bytes, and never try to iterate after
3798
0
            //    getting all the elements, the last `self.ctrl` will be equal to
3799
0
            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
3800
0
            //    will always contains indices within the range `0..Group::WIDTH`,
3801
0
            //    and subsequent `self.group_first_index + index` will always return a
3802
0
            //    number less than `self.buckets()`.
3803
0
            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
3804
0
3805
0
            // SAFETY: See explanation above.
3806
0
            self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast())
3807
0
                .match_full()
3808
0
                .into_iter();
3809
0
            self.group_first_index += Group::WIDTH;
3810
        }
3811
0
    }
3812
}
3813
3814
impl Iterator for FullBucketsIndices {
3815
    type Item = usize;
3816
3817
    /// Advances the iterator and returns the next value. It is up to
3818
    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
3819
    /// because we cannot make the `next` method unsafe.
3820
    #[inline(always)]
3821
0
    fn next(&mut self) -> Option<usize> {
3822
0
        // Return if we already yielded all items.
3823
0
        if self.items == 0 {
3824
0
            return None;
3825
0
        }
3826
0
3827
0
        let nxt = unsafe {
3828
0
            // SAFETY:
3829
0
            // 1. We check number of items to yield using `items` field.
3830
0
            // 2. The caller ensures that the table is alive and has not moved.
3831
0
            self.next_impl()
3832
0
        };
3833
0
3834
0
        debug_assert!(nxt.is_some());
3835
0
        self.items -= 1;
3836
0
3837
0
        nxt
3838
0
    }
3839
3840
    #[inline(always)]
3841
    fn size_hint(&self) -> (usize, Option<usize>) {
3842
        (self.items, Some(self.items))
3843
    }
3844
}
3845
3846
impl ExactSizeIterator for FullBucketsIndices {}
3847
impl FusedIterator for FullBucketsIndices {}
3848
3849
/// Iterator which consumes a table and returns elements.
3850
pub struct RawIntoIter<T, A: Allocator = Global> {
3851
    iter: RawIter<T>,
3852
    allocation: Option<(NonNull<u8>, Layout, A)>,
3853
    marker: PhantomData<T>,
3854
}
3855
3856
impl<T, A: Allocator> RawIntoIter<T, A> {
3857
    #[cfg_attr(feature = "inline-more", inline)]
3858
    pub fn iter(&self) -> RawIter<T> {
3859
        self.iter.clone()
3860
    }
3861
}
3862
3863
unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
3864
where
3865
    T: Send,
3866
    A: Send,
3867
{
3868
}
3869
unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
3870
where
3871
    T: Sync,
3872
    A: Sync,
3873
{
3874
}
3875
3876
#[cfg(feature = "nightly")]
3877
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
3878
    #[cfg_attr(feature = "inline-more", inline)]
3879
    fn drop(&mut self) {
3880
        unsafe {
3881
            // Drop all remaining elements
3882
            self.iter.drop_elements();
3883
3884
            // Free the table
3885
            if let Some((ptr, layout, ref alloc)) = self.allocation {
3886
                alloc.deallocate(ptr, layout);
3887
            }
3888
        }
3889
    }
3890
}
3891
#[cfg(not(feature = "nightly"))]
3892
impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
3893
    #[cfg_attr(feature = "inline-more", inline)]
3894
    fn drop(&mut self) {
3895
        unsafe {
3896
            // Drop all remaining elements
3897
            self.iter.drop_elements();
3898
3899
            // Free the table
3900
            if let Some((ptr, layout, ref alloc)) = self.allocation {
3901
                alloc.deallocate(ptr, layout);
3902
            }
3903
        }
3904
    }
3905
}
3906
3907
impl<T, A: Allocator> Default for RawIntoIter<T, A> {
3908
    fn default() -> Self {
3909
        Self {
3910
            iter: Default::default(),
3911
            allocation: None,
3912
            marker: PhantomData,
3913
        }
3914
    }
3915
}
3916
impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
3917
    type Item = T;
3918
3919
    #[cfg_attr(feature = "inline-more", inline)]
3920
    fn next(&mut self) -> Option<T> {
3921
        unsafe { Some(self.iter.next()?.read()) }
3922
    }
3923
3924
    #[inline]
3925
    fn size_hint(&self) -> (usize, Option<usize>) {
3926
        self.iter.size_hint()
3927
    }
3928
}
3929
3930
impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
3931
impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
3932
3933
/// Iterator which consumes elements without freeing the table storage.
3934
pub struct RawDrain<'a, T, A: Allocator = Global> {
3935
    iter: RawIter<T>,
3936
3937
    // The table is moved into the iterator for the duration of the drain. This
3938
    // ensures that an empty table is left if the drain iterator is leaked
3939
    // without dropping.
3940
    table: RawTableInner,
3941
    orig_table: NonNull<RawTableInner>,
3942
3943
    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
3944
    // covariant over T.
3945
    marker: PhantomData<&'a RawTable<T, A>>,
3946
}
3947
3948
impl<T, A: Allocator> RawDrain<'_, T, A> {
3949
    #[cfg_attr(feature = "inline-more", inline)]
3950
    pub fn iter(&self) -> RawIter<T> {
3951
        self.iter.clone()
3952
    }
3953
}
3954
3955
unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
3956
where
3957
    T: Send,
3958
    A: Send,
3959
{
3960
}
3961
unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
3962
where
3963
    T: Sync,
3964
    A: Sync,
3965
{
3966
}
3967
3968
impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
3969
    #[cfg_attr(feature = "inline-more", inline)]
3970
193k
    fn drop(&mut self) {
3971
193k
        unsafe {
3972
193k
            // Drop all remaining elements. Note that this may panic.
3973
193k
            self.iter.drop_elements();
3974
193k
3975
193k
            // Reset the contents of the table now that all elements have been
3976
193k
            // dropped.
3977
193k
            self.table.clear_no_drop();
3978
193k
3979
193k
            // Move the now empty table back to its original location.
3980
193k
            self.orig_table
3981
193k
                .as_ptr()
3982
193k
                .copy_from_nonoverlapping(&self.table, 1);
3983
193k
        }
3984
193k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3970
38.7k
    fn drop(&mut self) {
3971
38.7k
        unsafe {
3972
38.7k
            // Drop all remaining elements. Note that this may panic.
3973
38.7k
            self.iter.drop_elements();
3974
38.7k
3975
38.7k
            // Reset the contents of the table now that all elements have been
3976
38.7k
            // dropped.
3977
38.7k
            self.table.clear_no_drop();
3978
38.7k
3979
38.7k
            // Move the now empty table back to its original location.
3980
38.7k
            self.orig_table
3981
38.7k
                .as_ptr()
3982
38.7k
                .copy_from_nonoverlapping(&self.table, 1);
3983
38.7k
        }
3984
38.7k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3970
38.7k
    fn drop(&mut self) {
3971
38.7k
        unsafe {
3972
38.7k
            // Drop all remaining elements. Note that this may panic.
3973
38.7k
            self.iter.drop_elements();
3974
38.7k
3975
38.7k
            // Reset the contents of the table now that all elements have been
3976
38.7k
            // dropped.
3977
38.7k
            self.table.clear_no_drop();
3978
38.7k
3979
38.7k
            // Move the now empty table back to its original location.
3980
38.7k
            self.orig_table
3981
38.7k
                .as_ptr()
3982
38.7k
                .copy_from_nonoverlapping(&self.table, 1);
3983
38.7k
        }
3984
38.7k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3970
38.7k
    fn drop(&mut self) {
3971
38.7k
        unsafe {
3972
38.7k
            // Drop all remaining elements. Note that this may panic.
3973
38.7k
            self.iter.drop_elements();
3974
38.7k
3975
38.7k
            // Reset the contents of the table now that all elements have been
3976
38.7k
            // dropped.
3977
38.7k
            self.table.clear_no_drop();
3978
38.7k
3979
38.7k
            // Move the now empty table back to its original location.
3980
38.7k
            self.orig_table
3981
38.7k
                .as_ptr()
3982
38.7k
                .copy_from_nonoverlapping(&self.table, 1);
3983
38.7k
        }
3984
38.7k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3970
38.7k
    fn drop(&mut self) {
3971
38.7k
        unsafe {
3972
38.7k
            // Drop all remaining elements. Note that this may panic.
3973
38.7k
            self.iter.drop_elements();
3974
38.7k
3975
38.7k
            // Reset the contents of the table now that all elements have been
3976
38.7k
            // dropped.
3977
38.7k
            self.table.clear_no_drop();
3978
38.7k
3979
38.7k
            // Move the now empty table back to its original location.
3980
38.7k
            self.orig_table
3981
38.7k
                .as_ptr()
3982
38.7k
                .copy_from_nonoverlapping(&self.table, 1);
3983
38.7k
        }
3984
38.7k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)> as core::ops::drop::Drop>::drop
Line
Count
Source
3970
38.7k
    fn drop(&mut self) {
3971
38.7k
        unsafe {
3972
38.7k
            // Drop all remaining elements. Note that this may panic.
3973
38.7k
            self.iter.drop_elements();
3974
38.7k
3975
38.7k
            // Reset the contents of the table now that all elements have been
3976
38.7k
            // dropped.
3977
38.7k
            self.table.clear_no_drop();
3978
38.7k
3979
38.7k
            // Move the now empty table back to its original location.
3980
38.7k
            self.orig_table
3981
38.7k
                .as_ptr()
3982
38.7k
                .copy_from_nonoverlapping(&self.table, 1);
3983
38.7k
        }
3984
38.7k
    }
3985
}
3986
3987
impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
3988
    type Item = T;
3989
3990
    #[cfg_attr(feature = "inline-more", inline)]
3991
362k
    fn next(&mut self) -> Option<T> {
3992
        unsafe {
3993
362k
            let item = self.iter.next()?;
3994
168k
            Some(item.read())
3995
        }
3996
362k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<alloc::vec::Vec<u8>>, core::ptr::non_null::NonNull<lru::LruEntry<alloc::vec::Vec<u8>, alloc::vec::Vec<u8>>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3991
79.4k
    fn next(&mut self) -> Option<T> {
3992
        unsafe {
3993
79.4k
            let item = self.iter.next()?;
3994
40.6k
            Some(item.read())
3995
        }
3996
79.4k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, alloc::vec::Vec<u8>>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3991
131k
    fn next(&mut self) -> Option<T> {
3992
        unsafe {
3993
131k
            let item = self.iter.next()?;
3994
92.5k
            Some(item.read())
3995
        }
3996
131k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBFileGUIDOffset>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3991
52.9k
    fn next(&mut self) -> Option<T> {
3992
        unsafe {
3993
52.9k
            let item = self.iter.next()?;
3994
14.2k
            Some(item.read())
3995
        }
3996
52.9k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBCommonHdr>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBCommonHdr, suricata::smb::smb::SMBTree>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3991
51.1k
    fn next(&mut self) -> Option<T> {
3992
        unsafe {
3993
51.1k
            let item = self.iter.next()?;
3994
12.4k
            Some(item.read())
3995
        }
3996
51.1k
    }
<hashbrown::raw::RawDrain<(lru::KeyRef<suricata::smb::smb::SMBHashKeyHdrGuid>, core::ptr::non_null::NonNull<lru::LruEntry<suricata::smb::smb::SMBHashKeyHdrGuid, alloc::vec::Vec<u8>>>)> as core::iter::traits::iterator::Iterator>::next
Line
Count
Source
3991
47.4k
    fn next(&mut self) -> Option<T> {
3992
        unsafe {
3993
47.4k
            let item = self.iter.next()?;
3994
8.65k
            Some(item.read())
3995
        }
3996
47.4k
    }
3997
3998
    #[inline]
3999
    fn size_hint(&self) -> (usize, Option<usize>) {
4000
        self.iter.size_hint()
4001
    }
4002
}
4003
4004
impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
4005
impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
4006
4007
/// Iterator over occupied buckets that could match a given hash.
4008
///
4009
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
4010
/// items that have a hash value different than the one provided. You should
4011
/// always validate the returned values before using them.
4012
///
4013
/// For maximum flexibility this iterator is not bound by a lifetime, but you
4014
/// must observe several rules when using it:
4015
/// - You must not free the hash table while iterating (including via growing/shrinking).
4016
/// - It is fine to erase a bucket that has been yielded by the iterator.
4017
/// - Erasing a bucket that has not yet been yielded by the iterator may still
4018
///   result in the iterator yielding that bucket.
4019
/// - It is unspecified whether an element inserted after the iterator was
4020
///   created will be yielded by that iterator.
4021
/// - The order in which the iterator yields buckets is unspecified and may
4022
///   change in the future.
4023
pub struct RawIterHash<T> {
4024
    inner: RawIterHashInner,
4025
    _marker: PhantomData<T>,
4026
}
4027
4028
#[derive(Clone)]
4029
struct RawIterHashInner {
4030
    // See `RawTableInner`'s corresponding fields for details.
4031
    // We can't store a `*const RawTableInner` as it would get
4032
    // invalidated by the user calling `&mut` methods on `RawTable`.
4033
    bucket_mask: usize,
4034
    ctrl: NonNull<u8>,
4035
4036
    // The top 7 bits of the hash.
4037
    tag_hash: Tag,
4038
4039
    // The sequence of groups to probe in the search.
4040
    probe_seq: ProbeSeq,
4041
4042
    group: Group,
4043
4044
    // The elements within the group with a matching tag-hash.
4045
    bitmask: BitMaskIter,
4046
}
4047
4048
impl<T> RawIterHash<T> {
4049
    #[cfg_attr(feature = "inline-more", inline)]
4050
    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
4051
        RawIterHash {
4052
            inner: RawIterHashInner::new(&table.table, hash),
4053
            _marker: PhantomData,
4054
        }
4055
    }
4056
}
4057
4058
impl<T> Clone for RawIterHash<T> {
4059
    #[cfg_attr(feature = "inline-more", inline)]
4060
    fn clone(&self) -> Self {
4061
        Self {
4062
            inner: self.inner.clone(),
4063
            _marker: PhantomData,
4064
        }
4065
    }
4066
}
4067
4068
impl<T> Default for RawIterHash<T> {
4069
    #[cfg_attr(feature = "inline-more", inline)]
4070
    fn default() -> Self {
4071
        Self {
4072
            // SAFETY: Because the table is static, it always outlives the iter.
4073
            inner: unsafe { RawIterHashInner::new(&RawTableInner::NEW, 0) },
4074
            _marker: PhantomData,
4075
        }
4076
    }
4077
}
4078
4079
impl RawIterHashInner {
4080
    #[cfg_attr(feature = "inline-more", inline)]
4081
    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
4082
        let tag_hash = Tag::full(hash);
4083
        let probe_seq = table.probe_seq(hash);
4084
        let group = Group::load(table.ctrl(probe_seq.pos));
4085
        let bitmask = group.match_tag(tag_hash).into_iter();
4086
4087
        RawIterHashInner {
4088
            bucket_mask: table.bucket_mask,
4089
            ctrl: table.ctrl,
4090
            tag_hash,
4091
            probe_seq,
4092
            group,
4093
            bitmask,
4094
        }
4095
    }
4096
}
4097
4098
impl<T> Iterator for RawIterHash<T> {
4099
    type Item = Bucket<T>;
4100
4101
    fn next(&mut self) -> Option<Bucket<T>> {
4102
        unsafe {
4103
            match self.inner.next() {
4104
                Some(index) => {
4105
                    // Can't use `RawTable::bucket` here as we don't have
4106
                    // an actual `RawTable` reference to use.
4107
                    debug_assert!(index <= self.inner.bucket_mask);
4108
                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4109
                    Some(bucket)
4110
                }
4111
                None => None,
4112
            }
4113
        }
4114
    }
4115
}
4116
4117
impl Iterator for RawIterHashInner {
4118
    type Item = usize;
4119
4120
    fn next(&mut self) -> Option<Self::Item> {
4121
        unsafe {
4122
            loop {
4123
                if let Some(bit) = self.bitmask.next() {
4124
                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4125
                    return Some(index);
4126
                }
4127
                if likely(self.group.match_empty().any_bit_set()) {
4128
                    return None;
4129
                }
4130
                self.probe_seq.move_next(self.bucket_mask);
4131
4132
                // Can't use `RawTableInner::ctrl` here as we don't have
4133
                // an actual `RawTableInner` reference to use.
4134
                let index = self.probe_seq.pos;
4135
                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4136
                let group_ctrl = self.ctrl.as_ptr().add(index).cast();
4137
4138
                self.group = Group::load(group_ctrl);
4139
                self.bitmask = self.group.match_tag(self.tag_hash).into_iter();
4140
            }
4141
        }
4142
    }
4143
}
4144
4145
pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
4146
    pub iter: RawIter<T>,
4147
    pub table: &'a mut RawTable<T, A>,
4148
}
4149
4150
impl<T, A: Allocator> RawExtractIf<'_, T, A> {
4151
    #[cfg_attr(feature = "inline-more", inline)]
4152
    pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
4153
    where
4154
        F: FnMut(&mut T) -> bool,
4155
    {
4156
        unsafe {
4157
            for item in &mut self.iter {
4158
                if f(item.as_mut()) {
4159
                    return Some(self.table.remove(item).0);
4160
                }
4161
            }
4162
        }
4163
        None
4164
    }
4165
}
4166
4167
#[cfg(test)]
4168
mod test_map {
4169
    use super::*;
4170
4171
    #[test]
4172
    fn test_minimum_capacity_for_small_types() {
4173
        #[track_caller]
4174
        fn test_t<T>() {
4175
            let raw_table: RawTable<T> = RawTable::with_capacity(1);
4176
            let actual_buckets = raw_table.buckets();
4177
            let min_buckets = Group::WIDTH / core::mem::size_of::<T>();
4178
            assert!(
4179
                actual_buckets >= min_buckets,
4180
                "expected at least {min_buckets} buckets, got {actual_buckets} buckets"
4181
            );
4182
        }
4183
4184
        test_t::<u8>();
4185
4186
        // This is only "small" for some platforms, like x86_64 with SSE2, but
4187
        // there's no harm in running it on other platforms.
4188
        test_t::<u16>();
4189
    }
4190
4191
    fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
4192
        unsafe {
4193
            table.table.rehash_in_place(
4194
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
4195
                mem::size_of::<T>(),
4196
                if mem::needs_drop::<T>() {
4197
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
4198
                } else {
4199
                    None
4200
                },
4201
            );
4202
        }
4203
    }
4204
4205
    #[test]
4206
    fn rehash() {
4207
        let mut table = RawTable::new();
4208
        let hasher = |i: &u64| *i;
4209
        for i in 0..100 {
4210
            table.insert(i, i, hasher);
4211
        }
4212
4213
        for i in 0..100 {
4214
            unsafe {
4215
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4216
            }
4217
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4218
        }
4219
4220
        rehash_in_place(&mut table, hasher);
4221
4222
        for i in 0..100 {
4223
            unsafe {
4224
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4225
            }
4226
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4227
        }
4228
    }
4229
4230
    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4231
    /// AN UNINITIALIZED TABLE DURING THE DROP
4232
    #[test]
4233
    fn test_drop_uninitialized() {
4234
        use ::alloc::vec::Vec;
4235
4236
        let table = unsafe {
4237
            // SAFETY: The `buckets` is power of two and we're not
4238
            // trying to actually use the returned RawTable.
4239
            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4240
                .unwrap()
4241
        };
4242
        drop(table);
4243
    }
4244
4245
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4246
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4247
    #[test]
4248
    fn test_drop_zero_items() {
4249
        use ::alloc::vec::Vec;
4250
        unsafe {
4251
            // SAFETY: The `buckets` is power of two and we're not
4252
            // trying to actually use the returned RawTable.
4253
            let mut table =
4254
                RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4255
                    .unwrap();
4256
4257
            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4258
4259
            // SAFETY: We checked that the table is allocated and therefore the table already has
4260
            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4261
            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4262
            table.table.ctrl_slice().fill_empty();
4263
4264
            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4265
            table.table.ctrl(0).write_bytes(0, table.capacity());
4266
4267
            // Fix up the trailing control bytes. See the comments in set_ctrl
4268
            // for the handling of tables smaller than the group width.
4269
            if table.buckets() < Group::WIDTH {
4270
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4271
                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4272
                // `Group::WIDTH` is safe
4273
                table
4274
                    .table
4275
                    .ctrl(0)
4276
                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4277
            } else {
4278
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4279
                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4280
                // to `self.buckets() == self.bucket_mask + 1` is safe
4281
                table
4282
                    .table
4283
                    .ctrl(0)
4284
                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4285
            }
4286
            drop(table);
4287
        }
4288
    }
4289
4290
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4291
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4292
    #[test]
4293
    fn test_catch_panic_clone_from() {
4294
        use super::{AllocError, Allocator, Global};
4295
        use ::alloc::sync::Arc;
4296
        use ::alloc::vec::Vec;
4297
        use core::sync::atomic::{AtomicI8, Ordering};
4298
        use std::thread;
4299
4300
        struct MyAllocInner {
4301
            drop_count: Arc<AtomicI8>,
4302
        }
4303
4304
        #[derive(Clone)]
4305
        struct MyAlloc {
4306
            _inner: Arc<MyAllocInner>,
4307
        }
4308
4309
        impl Drop for MyAllocInner {
4310
            fn drop(&mut self) {
4311
                println!("MyAlloc freed.");
4312
                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4313
            }
4314
        }
4315
4316
        unsafe impl Allocator for MyAlloc {
4317
            fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
4318
                let g = Global;
4319
                g.allocate(layout)
4320
            }
4321
4322
            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4323
                let g = Global;
4324
                g.deallocate(ptr, layout)
4325
            }
4326
        }
4327
4328
        const DISARMED: bool = false;
4329
        const ARMED: bool = true;
4330
4331
        struct CheckedCloneDrop {
4332
            panic_in_clone: bool,
4333
            dropped: bool,
4334
            need_drop: Vec<u64>,
4335
        }
4336
4337
        impl Clone for CheckedCloneDrop {
4338
            fn clone(&self) -> Self {
4339
                if self.panic_in_clone {
4340
                    panic!("panic in clone")
4341
                }
4342
                Self {
4343
                    panic_in_clone: self.panic_in_clone,
4344
                    dropped: self.dropped,
4345
                    need_drop: self.need_drop.clone(),
4346
                }
4347
            }
4348
        }
4349
4350
        impl Drop for CheckedCloneDrop {
4351
            fn drop(&mut self) {
4352
                if self.dropped {
4353
                    panic!("double drop");
4354
                }
4355
                self.dropped = true;
4356
            }
4357
        }
4358
4359
        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4360
4361
        let mut table = RawTable::new_in(MyAlloc {
4362
            _inner: Arc::new(MyAllocInner {
4363
                drop_count: dropped.clone(),
4364
            }),
4365
        });
4366
4367
        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4368
            let idx = idx as u64;
4369
            table.insert(
4370
                idx,
4371
                (
4372
                    idx,
4373
                    CheckedCloneDrop {
4374
                        panic_in_clone,
4375
                        dropped: false,
4376
                        need_drop: vec![idx],
4377
                    },
4378
                ),
4379
                |(k, _)| *k,
4380
            );
4381
        }
4382
4383
        assert_eq!(table.len(), 7);
4384
4385
        thread::scope(|s| {
4386
            let result = s.spawn(|| {
4387
                let armed_flags = [
4388
                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4389
                ];
4390
                let mut scope_table = RawTable::new_in(MyAlloc {
4391
                    _inner: Arc::new(MyAllocInner {
4392
                        drop_count: dropped.clone(),
4393
                    }),
4394
                });
4395
                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4396
                    let idx = idx as u64;
4397
                    scope_table.insert(
4398
                        idx,
4399
                        (
4400
                            idx,
4401
                            CheckedCloneDrop {
4402
                                panic_in_clone,
4403
                                dropped: false,
4404
                                need_drop: vec![idx + 100],
4405
                            },
4406
                        ),
4407
                        |(k, _)| *k,
4408
                    );
4409
                }
4410
                table.clone_from(&scope_table);
4411
            });
4412
            assert!(result.join().is_err());
4413
        });
4414
4415
        // Let's check that all iterators work fine and do not return elements
4416
        // (especially `RawIterRange`, which does not depend on the number of
4417
        // elements in the table, but looks directly at the control bytes)
4418
        //
4419
        // SAFETY: We know for sure that `RawTable` will outlive
4420
        // the returned `RawIter / RawIterRange` iterator.
4421
        assert_eq!(table.len(), 0);
4422
        assert_eq!(unsafe { table.iter().count() }, 0);
4423
        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4424
4425
        for idx in 0..table.buckets() {
4426
            let idx = idx as u64;
4427
            assert!(
4428
                table.find(idx, |(k, _)| *k == idx).is_none(),
4429
                "Index: {idx}"
4430
            );
4431
        }
4432
4433
        // All allocator clones should already be dropped.
4434
        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4435
    }
4436
}