Coverage Report

Created: 2025-12-14 06:59

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/hashbrown-0.16.1/src/raw/mod.rs
Line
Count
Source
1
use crate::alloc::alloc::{handle_alloc_error, Layout};
2
use crate::control::{BitMaskIter, Group, Tag, TagSliceExt};
3
use crate::scopeguard::{guard, ScopeGuard};
4
use crate::util::{invalid_mut, likely, unlikely};
5
use crate::TryReserveError;
6
use core::array;
7
use core::iter::FusedIterator;
8
use core::marker::PhantomData;
9
use core::mem;
10
use core::ptr::NonNull;
11
use core::slice;
12
use core::{hint, ptr};
13
14
mod alloc;
15
#[cfg(test)]
16
pub(crate) use self::alloc::AllocError;
17
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
18
19
#[inline]
20
507k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
507k
    to.offset_from(from) as usize
22
507k
}
hashbrown::raw::offset_from::<usize>
Line
Count
Source
20
507k
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
507k
    to.offset_from(from) as usize
22
507k
}
Unexecuted instantiation: hashbrown::raw::offset_from::<_>
23
24
/// Whether memory allocation errors should return an error or abort.
25
#[derive(Copy, Clone)]
26
enum Fallibility {
27
    Fallible,
28
    Infallible,
29
}
30
31
impl Fallibility {
32
    /// Error to return on capacity overflow.
33
    #[cfg_attr(feature = "inline-more", inline)]
34
0
    fn capacity_overflow(self) -> TryReserveError {
35
0
        match self {
36
0
            Fallibility::Fallible => TryReserveError::CapacityOverflow,
37
0
            Fallibility::Infallible => panic!("Hash table capacity overflow"),
38
        }
39
0
    }
40
41
    /// Error to return on allocation error.
42
    #[cfg_attr(feature = "inline-more", inline)]
43
0
    fn alloc_err(self, layout: Layout) -> TryReserveError {
44
0
        match self {
45
0
            Fallibility::Fallible => TryReserveError::AllocError { layout },
46
0
            Fallibility::Infallible => handle_alloc_error(layout),
47
        }
48
0
    }
49
}
50
51
trait SizedTypeProperties: Sized {
52
    const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
53
    const NEEDS_DROP: bool = mem::needs_drop::<Self>();
54
}
55
56
impl<T> SizedTypeProperties for T {}
57
58
/// Primary hash function, used to select the initial bucket to probe from.
59
#[inline]
60
#[allow(clippy::cast_possible_truncation)]
61
2.36M
fn h1(hash: u64) -> usize {
62
    // On 32-bit platforms we simply ignore the higher hash bits.
63
2.36M
    hash as usize
64
2.36M
}
hashbrown::raw::h1
Line
Count
Source
61
2.36M
fn h1(hash: u64) -> usize {
62
    // On 32-bit platforms we simply ignore the higher hash bits.
63
2.36M
    hash as usize
64
2.36M
}
Unexecuted instantiation: hashbrown::raw::h1
65
66
/// Probe sequence based on triangular numbers, which is guaranteed (since our
67
/// table size is a power of two) to visit every group of elements exactly once.
68
///
69
/// A triangular probe has us jump by 1 more group every time. So first we
70
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
71
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
72
///
73
/// Proof that the probe will visit every group in the table:
74
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
75
#[derive(Clone)]
76
struct ProbeSeq {
77
    pos: usize,
78
    stride: usize,
79
}
80
81
impl ProbeSeq {
82
    #[inline]
83
89.0k
    fn move_next(&mut self, bucket_mask: usize) {
84
        // We should have found an empty bucket by now and ended the probe.
85
89.0k
        debug_assert!(
86
0
            self.stride <= bucket_mask,
87
0
            "Went past end of probe sequence"
88
        );
89
90
89.0k
        self.stride += Group::WIDTH;
91
89.0k
        self.pos += self.stride;
92
89.0k
        self.pos &= bucket_mask;
93
89.0k
    }
<hashbrown::raw::ProbeSeq>::move_next
Line
Count
Source
83
89.0k
    fn move_next(&mut self, bucket_mask: usize) {
84
        // We should have found an empty bucket by now and ended the probe.
85
89.0k
        debug_assert!(
86
0
            self.stride <= bucket_mask,
87
0
            "Went past end of probe sequence"
88
        );
89
90
89.0k
        self.stride += Group::WIDTH;
91
89.0k
        self.pos += self.stride;
92
89.0k
        self.pos &= bucket_mask;
93
89.0k
    }
Unexecuted instantiation: <hashbrown::raw::ProbeSeq>::move_next
94
}
95
96
/// Returns the number of buckets needed to hold the given number of items,
97
/// taking the maximum load factor into account.
98
///
99
/// Returns `None` if an overflow occurs.
100
///
101
/// This ensures that `buckets * table_layout.size >= table_layout.ctrl_align`.
102
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
103
#[cfg_attr(target_os = "emscripten", inline(never))]
104
#[cfg_attr(not(target_os = "emscripten"), inline)]
105
27.0k
fn capacity_to_buckets(cap: usize, table_layout: TableLayout) -> Option<usize> {
106
27.0k
    debug_assert_ne!(cap, 0);
107
108
    // For small tables we require at least 1 empty bucket so that lookups are
109
    // guaranteed to terminate if an element doesn't exist in the table.
110
27.0k
    if cap < 15 {
111
        // Consider a small TableLayout like { size: 1, ctrl_align: 16 } on a
112
        // platform with Group::WIDTH of 16 (like x86_64 with SSE2). For small
113
        // bucket sizes, this ends up wasting quite a few bytes just to pad to
114
        // the relatively larger ctrl_align:
115
        //
116
        // | capacity | buckets | bytes allocated | bytes per item |
117
        // | -------- | ------- | --------------- | -------------- |
118
        // |        3 |       4 |              36 | (Yikes!)  12.0 |
119
        // |        7 |       8 |              40 | (Poor)     5.7 |
120
        // |       14 |      16 |              48 |            3.4 |
121
        // |       28 |      32 |              80 |            3.3 |
122
        //
123
        // In general, buckets * table_layout.size >= table_layout.ctrl_align
124
        // must be true to avoid these edges. This is implemented by adjusting
125
        // the minimum capacity upwards for small items. This code only needs
126
        // to handle ctrl_align which are less than or equal to Group::WIDTH,
127
        // because valid layout sizes are always a multiple of the alignment,
128
        // so anything with alignment over the Group::WIDTH won't hit this edge
129
        // case.
130
131
        // This is brittle, e.g. if we ever add 32 byte groups, it will select
132
        // 3 regardless of the table_layout.size.
133
13.7k
        let min_cap = match (Group::WIDTH, table_layout.size) {
134
13.7k
            (16, 0..=1) => 14,
135
13.7k
            (16, 2..=3) => 7,
136
0
            (8, 0..=1) => 7,
137
13.7k
            _ => 3,
138
        };
139
13.7k
        let cap = min_cap.max(cap);
140
        // We don't bother with a table size of 2 buckets since that can only
141
        // hold a single element. Instead, we skip directly to a 4 bucket table
142
        // which can hold 3 elements.
143
13.7k
        let buckets = if cap < 4 {
144
6.43k
            4
145
7.35k
        } else if cap < 8 {
146
3.83k
            8
147
        } else {
148
3.52k
            16
149
        };
150
13.7k
        ensure_bucket_bytes_at_least_ctrl_align(table_layout, buckets);
151
13.7k
        return Some(buckets);
152
13.2k
    }
153
154
    // Otherwise require 1/8 buckets to be empty (87.5% load)
155
    //
156
    // Be careful when modifying this, calculate_layout relies on the
157
    // overflow check here.
158
13.2k
    let adjusted_cap = cap.checked_mul(8)? / 7;
159
160
    // Any overflows will have been caught by the checked_mul. Also, any
161
    // rounding errors from the division above will be cleaned up by
162
    // next_power_of_two (which can't overflow because of the previous division).
163
13.2k
    let buckets = adjusted_cap.next_power_of_two();
164
13.2k
    ensure_bucket_bytes_at_least_ctrl_align(table_layout, buckets);
165
13.2k
    Some(buckets)
166
27.0k
}
hashbrown::raw::capacity_to_buckets
Line
Count
Source
105
27.0k
fn capacity_to_buckets(cap: usize, table_layout: TableLayout) -> Option<usize> {
106
27.0k
    debug_assert_ne!(cap, 0);
107
108
    // For small tables we require at least 1 empty bucket so that lookups are
109
    // guaranteed to terminate if an element doesn't exist in the table.
110
27.0k
    if cap < 15 {
111
        // Consider a small TableLayout like { size: 1, ctrl_align: 16 } on a
112
        // platform with Group::WIDTH of 16 (like x86_64 with SSE2). For small
113
        // bucket sizes, this ends up wasting quite a few bytes just to pad to
114
        // the relatively larger ctrl_align:
115
        //
116
        // | capacity | buckets | bytes allocated | bytes per item |
117
        // | -------- | ------- | --------------- | -------------- |
118
        // |        3 |       4 |              36 | (Yikes!)  12.0 |
119
        // |        7 |       8 |              40 | (Poor)     5.7 |
120
        // |       14 |      16 |              48 |            3.4 |
121
        // |       28 |      32 |              80 |            3.3 |
122
        //
123
        // In general, buckets * table_layout.size >= table_layout.ctrl_align
124
        // must be true to avoid these edges. This is implemented by adjusting
125
        // the minimum capacity upwards for small items. This code only needs
126
        // to handle ctrl_align which are less than or equal to Group::WIDTH,
127
        // because valid layout sizes are always a multiple of the alignment,
128
        // so anything with alignment over the Group::WIDTH won't hit this edge
129
        // case.
130
131
        // This is brittle, e.g. if we ever add 32 byte groups, it will select
132
        // 3 regardless of the table_layout.size.
133
13.7k
        let min_cap = match (Group::WIDTH, table_layout.size) {
134
13.7k
            (16, 0..=1) => 14,
135
13.7k
            (16, 2..=3) => 7,
136
0
            (8, 0..=1) => 7,
137
13.7k
            _ => 3,
138
        };
139
13.7k
        let cap = min_cap.max(cap);
140
        // We don't bother with a table size of 2 buckets since that can only
141
        // hold a single element. Instead, we skip directly to a 4 bucket table
142
        // which can hold 3 elements.
143
13.7k
        let buckets = if cap < 4 {
144
6.43k
            4
145
7.35k
        } else if cap < 8 {
146
3.83k
            8
147
        } else {
148
3.52k
            16
149
        };
150
13.7k
        ensure_bucket_bytes_at_least_ctrl_align(table_layout, buckets);
151
13.7k
        return Some(buckets);
152
13.2k
    }
153
154
    // Otherwise require 1/8 buckets to be empty (87.5% load)
155
    //
156
    // Be careful when modifying this, calculate_layout relies on the
157
    // overflow check here.
158
13.2k
    let adjusted_cap = cap.checked_mul(8)? / 7;
159
160
    // Any overflows will have been caught by the checked_mul. Also, any
161
    // rounding errors from the division above will be cleaned up by
162
    // next_power_of_two (which can't overflow because of the previous division).
163
13.2k
    let buckets = adjusted_cap.next_power_of_two();
164
13.2k
    ensure_bucket_bytes_at_least_ctrl_align(table_layout, buckets);
165
13.2k
    Some(buckets)
166
27.0k
}
Unexecuted instantiation: hashbrown::raw::capacity_to_buckets
167
168
// `maximum_buckets_in` relies on the property that for non-ZST `T`, any
169
// chosen `buckets` will satisfy `buckets * table_layout.size >=
170
// table_layout.ctrl_align`, so `calculate_layout_for` does not need to add
171
// extra padding beyond `table_layout.size * buckets`. If small-table bucket
172
// selection or growth policy changes, revisit `maximum_buckets_in`.
173
#[inline]
174
27.0k
fn ensure_bucket_bytes_at_least_ctrl_align(table_layout: TableLayout, buckets: usize) {
175
27.0k
    if table_layout.size != 0 {
176
27.0k
        let prod = table_layout.size.saturating_mul(buckets);
177
27.0k
        debug_assert!(prod >= table_layout.ctrl_align);
178
0
    }
179
27.0k
}
hashbrown::raw::ensure_bucket_bytes_at_least_ctrl_align
Line
Count
Source
174
27.0k
fn ensure_bucket_bytes_at_least_ctrl_align(table_layout: TableLayout, buckets: usize) {
175
27.0k
    if table_layout.size != 0 {
176
27.0k
        let prod = table_layout.size.saturating_mul(buckets);
177
27.0k
        debug_assert!(prod >= table_layout.ctrl_align);
178
0
    }
179
27.0k
}
Unexecuted instantiation: hashbrown::raw::ensure_bucket_bytes_at_least_ctrl_align
180
181
/// Returns the maximum effective capacity for the given bucket mask, taking
182
/// the maximum load factor into account.
183
#[inline]
184
54.0k
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
185
54.0k
    if bucket_mask < 8 {
186
        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
187
        // Keep in mind that the bucket mask is one less than the bucket count.
188
24.0k
        bucket_mask
189
    } else {
190
        // For larger tables we reserve 12.5% of the slots as empty.
191
29.9k
        ((bucket_mask + 1) / 8) * 7
192
    }
193
54.0k
}
hashbrown::raw::bucket_mask_to_capacity
Line
Count
Source
184
54.0k
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
185
54.0k
    if bucket_mask < 8 {
186
        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
187
        // Keep in mind that the bucket mask is one less than the bucket count.
188
24.0k
        bucket_mask
189
    } else {
190
        // For larger tables we reserve 12.5% of the slots as empty.
191
29.9k
        ((bucket_mask + 1) / 8) * 7
192
    }
193
54.0k
}
Unexecuted instantiation: hashbrown::raw::bucket_mask_to_capacity
194
195
/// Helper which allows the max calculation for `ctrl_align` to be statically computed for each `T`
196
/// while keeping the rest of `calculate_layout_for` independent of `T`
197
#[derive(Copy, Clone)]
198
struct TableLayout {
199
    size: usize,
200
    ctrl_align: usize,
201
}
202
203
impl TableLayout {
204
    #[inline]
205
0
    const fn new<T>() -> Self {
206
0
        let layout = Layout::new::<T>();
207
        Self {
208
0
            size: layout.size(),
209
0
            ctrl_align: if layout.align() > Group::WIDTH {
210
0
                layout.align()
211
            } else {
212
0
                Group::WIDTH
213
            },
214
        }
215
0
    }
216
217
    #[inline]
218
54.0k
    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
219
54.0k
        debug_assert!(buckets.is_power_of_two());
220
221
54.0k
        let TableLayout { size, ctrl_align } = self;
222
        // Manual layout calculation since Layout methods are not yet stable.
223
54.0k
        let ctrl_offset =
224
54.0k
            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
225
54.0k
        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
226
227
        // We need an additional check to ensure that the allocation doesn't
228
        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
229
54.0k
        if len > isize::MAX as usize - (ctrl_align - 1) {
230
0
            return None;
231
54.0k
        }
232
233
54.0k
        Some((
234
54.0k
            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
235
54.0k
            ctrl_offset,
236
54.0k
        ))
237
54.0k
    }
<hashbrown::raw::TableLayout>::calculate_layout_for
Line
Count
Source
218
54.0k
    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
219
54.0k
        debug_assert!(buckets.is_power_of_two());
220
221
54.0k
        let TableLayout { size, ctrl_align } = self;
222
        // Manual layout calculation since Layout methods are not yet stable.
223
54.0k
        let ctrl_offset =
224
54.0k
            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
225
54.0k
        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
226
227
        // We need an additional check to ensure that the allocation doesn't
228
        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
229
54.0k
        if len > isize::MAX as usize - (ctrl_align - 1) {
230
0
            return None;
231
54.0k
        }
232
233
54.0k
        Some((
234
54.0k
            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
235
54.0k
            ctrl_offset,
236
54.0k
        ))
237
54.0k
    }
Unexecuted instantiation: <hashbrown::raw::TableLayout>::calculate_layout_for
238
}
239
240
/// A reference to a hash table bucket containing a `T`.
241
///
242
/// This is usually just a pointer to the element itself. However if the element
243
/// is a ZST, then we instead track the index of the element in the table so
244
/// that `erase` works properly.
245
pub struct Bucket<T> {
246
    // Actually it is pointer to next element than element itself
247
    // this is needed to maintain pointer arithmetic invariants
248
    // keeping direct pointer to element introduces difficulty.
249
    // Using `NonNull` for variance and niche layout
250
    ptr: NonNull<T>,
251
}
252
253
// This Send impl is needed for rayon support. This is safe since Bucket is
254
// never exposed in a public API.
255
unsafe impl<T> Send for Bucket<T> {}
256
257
impl<T> Clone for Bucket<T> {
258
    #[inline]
259
0
    fn clone(&self) -> Self {
260
0
        Self { ptr: self.ptr }
261
0
    }
262
}
263
264
impl<T> Bucket<T> {
265
    /// Creates a [`Bucket`] that contain pointer to the data.
266
    /// The pointer calculation is performed by calculating the
267
    /// offset from given `base` pointer (convenience for
268
    /// `base.as_ptr().sub(index)`).
269
    ///
270
    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
271
    /// offset of `3 * size_of::<T>()` bytes.
272
    ///
273
    /// If the `T` is a ZST, then we instead track the index of the element
274
    /// in the table so that `erase` works properly (return
275
    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
276
    ///
277
    /// # Safety
278
    ///
279
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
280
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
281
    /// rules of [`NonNull::new_unchecked`] function.
282
    ///
283
    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
284
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
285
    /// logic of the work of this crate, the following rules are necessary and
286
    /// sufficient:
287
    ///
288
    /// * the `base` pointer must not be `dangling` and must points to the
289
    ///   end of the first `value element` from the `data part` of the table, i.e.
290
    ///   must be the pointer that returned by [`RawTable::data_end`] or by
291
    ///   [`RawTableInner::data_end<T>`];
292
    ///
293
    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
294
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
295
    ///   must be no greater than the number returned by the function
296
    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
297
    ///
298
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
299
    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
300
    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
301
    /// must be no greater than the number returned by the function
302
    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
303
    ///
304
    /// [`Bucket`]: crate::raw::Bucket
305
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
306
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
307
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
308
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
309
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
310
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
311
    #[inline]
312
3.20M
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
313
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
314
        // the data part of the table (we start counting from "0", so that
315
        // in the expression T[last], the "last" index actually one less than the
316
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
317
        //
318
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
319
        //                   points here in the data part of the table
320
        //                   (to the start of T1)
321
        //                        |
322
        //                        |        `base: NonNull<T>` must point here
323
        //                        |         (to the end of T0 or to the start of C0)
324
        //                        v         v
325
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
326
        //                           ^
327
        //                           `from_base_index(base, 1)` returns a pointer
328
        //                           that points here in the data part of the table
329
        //                           (to the end of T1)
330
        //
331
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
332
        // or metadata for data.
333
3.20M
        let ptr = if T::IS_ZERO_SIZED {
334
            // won't overflow because index must be less than length (bucket_mask)
335
            // and bucket_mask is guaranteed to be less than `isize::MAX`
336
            // (see TableLayout::calculate_layout_for method)
337
0
            invalid_mut(index + 1)
338
        } else {
339
3.20M
            base.as_ptr().sub(index)
340
        };
341
3.20M
        Self {
342
3.20M
            ptr: NonNull::new_unchecked(ptr),
343
3.20M
        }
344
3.20M
    }
<hashbrown::raw::Bucket<usize>>::from_base_index
Line
Count
Source
312
3.20M
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
313
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
314
        // the data part of the table (we start counting from "0", so that
315
        // in the expression T[last], the "last" index actually one less than the
316
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
317
        //
318
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
319
        //                   points here in the data part of the table
320
        //                   (to the start of T1)
321
        //                        |
322
        //                        |        `base: NonNull<T>` must point here
323
        //                        |         (to the end of T0 or to the start of C0)
324
        //                        v         v
325
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
326
        //                           ^
327
        //                           `from_base_index(base, 1)` returns a pointer
328
        //                           that points here in the data part of the table
329
        //                           (to the end of T1)
330
        //
331
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
332
        // or metadata for data.
333
3.20M
        let ptr = if T::IS_ZERO_SIZED {
334
            // won't overflow because index must be less than length (bucket_mask)
335
            // and bucket_mask is guaranteed to be less than `isize::MAX`
336
            // (see TableLayout::calculate_layout_for method)
337
0
            invalid_mut(index + 1)
338
        } else {
339
3.20M
            base.as_ptr().sub(index)
340
        };
341
3.20M
        Self {
342
3.20M
            ptr: NonNull::new_unchecked(ptr),
343
3.20M
        }
344
3.20M
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::from_base_index
345
346
    /// Calculates the index of a [`Bucket`] as distance between two pointers
347
    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
348
    /// The returned value is in units of T: the distance in bytes divided by
349
    /// [`core::mem::size_of::<T>()`].
350
    ///
351
    /// If the `T` is a ZST, then we return the index of the element in
352
    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
353
    ///
354
    /// This function is the inverse of [`from_base_index`].
355
    ///
356
    /// # Safety
357
    ///
358
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
359
    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
360
    ///
361
    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
362
    /// method, as well as for the correct logic of the work of this crate, the
363
    /// following rules are necessary and sufficient:
364
    ///
365
    /// * `base` contained pointer must not be `dangling` and must point to the
366
    ///   end of the first `element` from the `data part` of the table, i.e.
367
    ///   must be a pointer that returns by [`RawTable::data_end`] or by
368
    ///   [`RawTableInner::data_end<T>`];
369
    ///
370
    /// * `self` also must not contain dangling pointer;
371
    ///
372
    /// * both `self` and `base` must be created from the same [`RawTable`]
373
    ///   (or [`RawTableInner`]).
374
    ///
375
    /// If `mem::size_of::<T>() == 0`, this function is always safe.
376
    ///
377
    /// [`Bucket`]: crate::raw::Bucket
378
    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
379
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
380
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
381
    /// [`RawTable`]: crate::raw::RawTable
382
    /// [`RawTableInner`]: RawTableInner
383
    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
384
    #[inline]
385
507k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
386
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
387
        // `element` in the data part of the table (we start counting from "0", so
388
        // that in the expression T[last], the "last" index actually is one less than the
389
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
390
        // For example for 5th element in table calculation is performed like this:
391
        //
392
        //                        mem::size_of::<T>()
393
        //                          |
394
        //                          |         `self = from_base_index(base, 5)` that returns pointer
395
        //                          |         that points here in the data part of the table
396
        //                          |         (to the end of T5)
397
        //                          |           |                    `base: NonNull<T>` must point here
398
        //                          v           |                    (to the end of T0 or to the start of C0)
399
        //                        /???\         v                      v
400
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
401
        //                                      \__________  __________/
402
        //                                                 \/
403
        //                                     `bucket.to_base_index(base)` = 5
404
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
405
        //
406
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
407
507k
        if T::IS_ZERO_SIZED {
408
            // this can not be UB
409
0
            self.ptr.as_ptr() as usize - 1
410
        } else {
411
507k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
412
        }
413
507k
    }
<hashbrown::raw::Bucket<usize>>::to_base_index
Line
Count
Source
385
507k
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
386
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
387
        // `element` in the data part of the table (we start counting from "0", so
388
        // that in the expression T[last], the "last" index actually is one less than the
389
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
390
        // For example for 5th element in table calculation is performed like this:
391
        //
392
        //                        mem::size_of::<T>()
393
        //                          |
394
        //                          |         `self = from_base_index(base, 5)` that returns pointer
395
        //                          |         that points here in the data part of the table
396
        //                          |         (to the end of T5)
397
        //                          |           |                    `base: NonNull<T>` must point here
398
        //                          v           |                    (to the end of T0 or to the start of C0)
399
        //                        /???\         v                      v
400
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
401
        //                                      \__________  __________/
402
        //                                                 \/
403
        //                                     `bucket.to_base_index(base)` = 5
404
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
405
        //
406
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
407
507k
        if T::IS_ZERO_SIZED {
408
            // this can not be UB
409
0
            self.ptr.as_ptr() as usize - 1
410
        } else {
411
507k
            offset_from(base.as_ptr(), self.ptr.as_ptr())
412
        }
413
507k
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::to_base_index
414
415
    /// Acquires the underlying raw pointer `*mut T` to `data`.
416
    ///
417
    /// # Note
418
    ///
419
    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
420
    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
421
    /// for properly dropping the data we also need to clear `data` control bytes. If we
422
    /// drop data, but do not clear `data control byte` it leads to double drop when
423
    /// [`RawTable`] goes out of scope.
424
    ///
425
    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
426
    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
427
    /// will not re-evaluate where the new value should go, meaning the value may become
428
    /// "lost" if their location does not reflect their state.
429
    ///
430
    /// [`RawTable`]: crate::raw::RawTable
431
    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
432
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
433
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
434
    #[inline]
435
3.20M
    pub fn as_ptr(&self) -> *mut T {
436
3.20M
        if T::IS_ZERO_SIZED {
437
            // Just return an arbitrary ZST pointer which is properly aligned
438
            // invalid pointer is good enough for ZST
439
0
            invalid_mut(mem::align_of::<T>())
440
        } else {
441
3.20M
            unsafe { self.ptr.as_ptr().sub(1) }
442
        }
443
3.20M
    }
<hashbrown::raw::Bucket<usize>>::as_ptr
Line
Count
Source
435
3.20M
    pub fn as_ptr(&self) -> *mut T {
436
3.20M
        if T::IS_ZERO_SIZED {
437
            // Just return an arbitrary ZST pointer which is properly aligned
438
            // invalid pointer is good enough for ZST
439
0
            invalid_mut(mem::align_of::<T>())
440
        } else {
441
3.20M
            unsafe { self.ptr.as_ptr().sub(1) }
442
        }
443
3.20M
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_ptr
444
445
    /// Acquires the underlying non-null pointer `*mut T` to `data`.
446
    #[inline]
447
0
    fn as_non_null(&self) -> NonNull<T> {
448
        // SAFETY: `self.ptr` is already a `NonNull`
449
0
        unsafe { NonNull::new_unchecked(self.as_ptr()) }
450
0
    }
451
452
    /// Create a new [`Bucket`] that is offset from the `self` by the given
453
    /// `offset`. The pointer calculation is performed by calculating the
454
    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
455
    /// This function is used for iterators.
456
    ///
457
    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
458
    /// offset of `3 * size_of::<T>()` bytes.
459
    ///
460
    /// # Safety
461
    ///
462
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
463
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
464
    /// rules of [`NonNull::new_unchecked`] function.
465
    ///
466
    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
467
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
468
    /// logic of the work of this crate, the following rules are necessary and
469
    /// sufficient:
470
    ///
471
    /// * `self` contained pointer must not be `dangling`;
472
    ///
473
    /// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
474
    ///   i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other
475
    ///   words, `self.to_base_index() + offset + 1` must be no greater than the number returned
476
    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
477
    ///
478
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
479
    /// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
480
    /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words,
481
    /// `self.to_base_index() + offset + 1` must be no greater than the number returned by the
482
    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
483
    ///
484
    /// [`Bucket`]: crate::raw::Bucket
485
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
486
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
487
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
488
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
489
    #[inline]
490
0
    unsafe fn next_n(&self, offset: usize) -> Self {
491
0
        let ptr = if T::IS_ZERO_SIZED {
492
            // invalid pointer is good enough for ZST
493
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
494
        } else {
495
0
            self.ptr.as_ptr().sub(offset)
496
        };
497
0
        Self {
498
0
            ptr: NonNull::new_unchecked(ptr),
499
0
        }
500
0
    }
501
502
    /// Executes the destructor (if any) of the pointed-to `data`.
503
    ///
504
    /// # Safety
505
    ///
506
    /// See [`ptr::drop_in_place`] for safety concerns.
507
    ///
508
    /// You should use [`RawTable::erase`] instead of this function,
509
    /// or be careful with calling this function directly, because for
510
    /// properly dropping the data we need also clear `data` control bytes.
511
    /// If we drop data, but do not erase `data control byte` it leads to
512
    /// double drop when [`RawTable`] goes out of scope.
513
    ///
514
    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
515
    /// [`RawTable`]: crate::raw::RawTable
516
    /// [`RawTable::erase`]: crate::raw::RawTable::erase
517
    #[cfg_attr(feature = "inline-more", inline)]
518
0
    pub(crate) unsafe fn drop(&self) {
519
0
        self.as_ptr().drop_in_place();
520
0
    }
521
522
    /// Reads the `value` from `self` without moving it. This leaves the
523
    /// memory in `self` unchanged.
524
    ///
525
    /// # Safety
526
    ///
527
    /// See [`ptr::read`] for safety concerns.
528
    ///
529
    /// You should use [`RawTable::remove`] instead of this function,
530
    /// or be careful with calling this function directly, because compiler
531
    /// calls its destructor when the read `value` goes out of scope. It
532
    /// can cause double dropping when [`RawTable`] goes out of scope,
533
    /// because of not erased `data control byte`.
534
    ///
535
    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
536
    /// [`RawTable`]: crate::raw::RawTable
537
    /// [`RawTable::remove`]: crate::raw::RawTable::remove
538
    #[inline]
539
478k
    pub(crate) unsafe fn read(&self) -> T {
540
478k
        self.as_ptr().read()
541
478k
    }
<hashbrown::raw::Bucket<usize>>::read
Line
Count
Source
539
478k
    pub(crate) unsafe fn read(&self) -> T {
540
478k
        self.as_ptr().read()
541
478k
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::read
542
543
    /// Overwrites a memory location with the given `value` without reading
544
    /// or dropping the old value (like [`ptr::write`] function).
545
    ///
546
    /// # Safety
547
    ///
548
    /// See [`ptr::write`] for safety concerns.
549
    ///
550
    /// # Note
551
    ///
552
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
553
    /// those for the old `T` value, as the map will not re-evaluate where the new
554
    /// value should go, meaning the value may become "lost" if their location
555
    /// does not reflect their state.
556
    ///
557
    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
558
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
559
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
560
    #[inline]
561
478k
    pub(crate) unsafe fn write(&self, val: T) {
562
478k
        self.as_ptr().write(val);
563
478k
    }
<hashbrown::raw::Bucket<usize>>::write
Line
Count
Source
561
478k
    pub(crate) unsafe fn write(&self, val: T) {
562
478k
        self.as_ptr().write(val);
563
478k
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::write
564
565
    /// Returns a shared immutable reference to the `value`.
566
    ///
567
    /// # Safety
568
    ///
569
    /// See [`NonNull::as_ref`] for safety concerns.
570
    ///
571
    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
572
    #[inline]
573
1.80M
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
574
1.80M
        &*self.as_ptr()
575
1.80M
    }
<hashbrown::raw::Bucket<usize>>::as_ref
Line
Count
Source
573
1.80M
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
574
1.80M
        &*self.as_ptr()
575
1.80M
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_ref
576
577
    /// Returns a unique mutable reference to the `value`.
578
    ///
579
    /// # Safety
580
    ///
581
    /// See [`NonNull::as_mut`] for safety concerns.
582
    ///
583
    /// # Note
584
    ///
585
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
586
    /// those for the old `T` value, as the map will not re-evaluate where the new
587
    /// value should go, meaning the value may become "lost" if their location
588
    /// does not reflect their state.
589
    ///
590
    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
591
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
592
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
593
    #[inline]
594
441k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
595
441k
        &mut *self.as_ptr()
596
441k
    }
<hashbrown::raw::Bucket<usize>>::as_mut
Line
Count
Source
594
441k
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
595
441k
        &mut *self.as_ptr()
596
441k
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_mut
597
}
598
599
/// A raw hash table with an unsafe API.
600
pub struct RawTable<T, A: Allocator = Global> {
601
    table: RawTableInner,
602
    alloc: A,
603
    // Tell dropck that we own instances of T.
604
    marker: PhantomData<T>,
605
}
606
607
/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
608
/// of how many different key-value types are used.
609
struct RawTableInner {
610
    // Mask to get an index from a hash value. The value is one less than the
611
    // number of buckets in the table.
612
    bucket_mask: usize,
613
614
    // [Padding], T_n, ..., T1, T0, C0, C1, ...
615
    //                              ^ points here
616
    ctrl: NonNull<u8>,
617
618
    // Number of elements that can be inserted before we need to grow the table
619
    growth_left: usize,
620
621
    // Number of elements in the table, only really used by len()
622
    items: usize,
623
}
624
625
impl<T> RawTable<T, Global> {
626
    /// Creates a new empty hash table without allocating any memory.
627
    ///
628
    /// In effect this returns a table with exactly 1 bucket. However we can
629
    /// leave the data pointer dangling since that bucket is never written to
630
    /// due to our load factor forcing us to always have at least 1 free bucket.
631
    #[inline]
632
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
633
12.7k
    pub const fn new() -> Self {
634
12.7k
        Self {
635
12.7k
            table: RawTableInner::NEW,
636
12.7k
            alloc: Global,
637
12.7k
            marker: PhantomData,
638
12.7k
        }
639
12.7k
    }
<hashbrown::raw::RawTable<usize>>::new
Line
Count
Source
633
12.7k
    pub const fn new() -> Self {
634
12.7k
        Self {
635
12.7k
            table: RawTableInner::NEW,
636
12.7k
            alloc: Global,
637
12.7k
            marker: PhantomData,
638
12.7k
        }
639
12.7k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_>>::new
640
641
    /// Allocates a new hash table with at least enough capacity for inserting
642
    /// the given number of elements without reallocating.
643
0
    pub fn with_capacity(capacity: usize) -> Self {
644
0
        Self::with_capacity_in(capacity, Global)
645
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity
Unexecuted instantiation: <hashbrown::raw::RawTable<_>>::with_capacity
646
}
647
648
impl<T, A: Allocator> RawTable<T, A> {
649
    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
650
651
    /// Creates a new empty hash table without allocating any memory, using the
652
    /// given allocator.
653
    ///
654
    /// In effect this returns a table with exactly 1 bucket. However we can
655
    /// leave the data pointer dangling since that bucket is never written to
656
    /// due to our load factor forcing us to always have at least 1 free bucket.
657
    #[inline]
658
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
659
0
    pub const fn new_in(alloc: A) -> Self {
660
0
        Self {
661
0
            table: RawTableInner::NEW,
662
0
            alloc,
663
0
            marker: PhantomData,
664
0
        }
665
0
    }
666
667
    /// Allocates a new hash table with the given number of buckets.
668
    ///
669
    /// The control bytes are left uninitialized.
670
    #[cfg_attr(feature = "inline-more", inline)]
671
0
    unsafe fn new_uninitialized(
672
0
        alloc: A,
673
0
        buckets: usize,
674
0
        fallibility: Fallibility,
675
0
    ) -> Result<Self, TryReserveError> {
676
0
        debug_assert!(buckets.is_power_of_two());
677
678
        Ok(Self {
679
0
            table: RawTableInner::new_uninitialized(
680
0
                &alloc,
681
                Self::TABLE_LAYOUT,
682
0
                buckets,
683
0
                fallibility,
684
0
            )?,
685
0
            alloc,
686
0
            marker: PhantomData,
687
        })
688
0
    }
689
690
    /// Allocates a new hash table using the given allocator, with at least enough capacity for
691
    /// inserting the given number of elements without reallocating.
692
0
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
693
0
        Self {
694
0
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
695
0
            alloc,
696
0
            marker: PhantomData,
697
0
        }
698
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity_in
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::with_capacity_in
699
700
    /// Returns a reference to the underlying allocator.
701
    #[inline]
702
0
    pub fn allocator(&self) -> &A {
703
0
        &self.alloc
704
0
    }
705
706
    /// Returns pointer to one past last `data` element in the table as viewed from
707
    /// the start point of the allocation.
708
    ///
709
    /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
710
    /// otherwise using it may result in [`undefined behavior`].
711
    ///
712
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
713
    #[inline]
714
2.98M
    pub fn data_end(&self) -> NonNull<T> {
715
        //                        `self.table.ctrl.cast()` returns pointer that
716
        //                        points here (to the end of `T0`)
717
        //                          ∨
718
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
719
        //                           \________  ________/
720
        //                                    \/
721
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
722
        //
723
        // where: T0...T_n  - our stored data;
724
        //        CT0...CT_n - control bytes or metadata for `data`.
725
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
726
        //                        with loading `Group` bytes from the heap works properly, even if the result
727
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
728
        //                        `RawTableInner::set_ctrl` function.
729
        //
730
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
731
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
732
2.98M
        self.table.ctrl.cast()
733
2.98M
    }
<hashbrown::raw::RawTable<usize>>::data_end
Line
Count
Source
714
2.98M
    pub fn data_end(&self) -> NonNull<T> {
715
        //                        `self.table.ctrl.cast()` returns pointer that
716
        //                        points here (to the end of `T0`)
717
        //                          ∨
718
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
719
        //                           \________  ________/
720
        //                                    \/
721
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
722
        //
723
        // where: T0...T_n  - our stored data;
724
        //        CT0...CT_n - control bytes or metadata for `data`.
725
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
726
        //                        with loading `Group` bytes from the heap works properly, even if the result
727
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
728
        //                        `RawTableInner::set_ctrl` function.
729
        //
730
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
731
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
732
2.98M
        self.table.ctrl.cast()
733
2.98M
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::data_end
734
735
    /// Returns pointer to start of data table.
736
    #[inline]
737
    #[cfg(feature = "nightly")]
738
    pub unsafe fn data_start(&self) -> NonNull<T> {
739
        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
740
    }
741
742
    /// Returns the total amount of memory allocated internally by the hash
743
    /// table, in bytes.
744
    ///
745
    /// The returned number is informational only. It is intended to be
746
    /// primarily used for memory profiling.
747
    #[inline]
748
0
    pub fn allocation_size(&self) -> usize {
749
        // SAFETY: We use the same `table_layout` that was used to allocate
750
        // this table.
751
0
        unsafe { self.table.allocation_size_or_zero(Self::TABLE_LAYOUT) }
752
0
    }
753
754
    /// Returns the index of a bucket from a `Bucket`.
755
    #[inline]
756
507k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
757
507k
        bucket.to_base_index(self.data_end())
758
507k
    }
<hashbrown::raw::RawTable<usize>>::bucket_index
Line
Count
Source
756
507k
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
757
507k
        bucket.to_base_index(self.data_end())
758
507k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::bucket_index
759
760
    /// Returns a pointer to an element in the table.
761
    ///
762
    /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
763
    /// otherwise using it may result in [`undefined behavior`].
764
    ///
765
    /// # Safety
766
    ///
767
    /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
768
    /// following safety rules:
769
    ///
770
    /// * The table must already be allocated;
771
    ///
772
    /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
773
    ///   function, i.e. `(index + 1) <= self.buckets()`.
774
    ///
775
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
776
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
777
    ///
778
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
779
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
780
    /// `(index + 1) <= self.buckets()`.
781
    ///
782
    /// [`RawTable::buckets`]: RawTable::buckets
783
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
784
    #[inline]
785
2.48M
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
786
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
787
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
788
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
789
        //
790
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
791
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
792
        //                  |
793
        //                  |               `base = self.data_end()` points here
794
        //                  |               (to the start of CT0 or to the end of T0)
795
        //                  v                 v
796
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
797
        //                     ^                                              \__________  __________/
798
        //        `table.bucket(3)` returns a pointer that points                        \/
799
        //         here in the `data` part of the `RawTable` (to              additional control bytes
800
        //         the end of T3)                                              `m = Group::WIDTH - 1`
801
        //
802
        // where: T0...T_n  - our stored data;
803
        //        CT0...CT_n - control bytes or metadata for `data`;
804
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
805
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
806
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
807
        //
808
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
809
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
810
2.48M
        debug_assert_ne!(self.table.bucket_mask, 0);
811
2.48M
        debug_assert!(index < self.buckets());
812
2.48M
        Bucket::from_base_index(self.data_end(), index)
813
2.48M
    }
<hashbrown::raw::RawTable<usize>>::bucket
Line
Count
Source
785
2.48M
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
786
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
787
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
788
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
789
        //
790
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
791
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
792
        //                  |
793
        //                  |               `base = self.data_end()` points here
794
        //                  |               (to the start of CT0 or to the end of T0)
795
        //                  v                 v
796
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
797
        //                     ^                                              \__________  __________/
798
        //        `table.bucket(3)` returns a pointer that points                        \/
799
        //         here in the `data` part of the `RawTable` (to              additional control bytes
800
        //         the end of T3)                                              `m = Group::WIDTH - 1`
801
        //
802
        // where: T0...T_n  - our stored data;
803
        //        CT0...CT_n - control bytes or metadata for `data`;
804
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
805
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
806
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
807
        //
808
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
809
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
810
2.48M
        debug_assert_ne!(self.table.bucket_mask, 0);
811
2.48M
        debug_assert!(index < self.buckets());
812
2.48M
        Bucket::from_base_index(self.data_end(), index)
813
2.48M
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::bucket
814
815
    /// Erases an element from the table without dropping it.
816
    #[cfg_attr(feature = "inline-more", inline)]
817
0
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
818
0
        let index = self.bucket_index(item);
819
0
        self.table.erase(index);
820
0
    }
821
822
    /// Erases an element from the table, dropping it in place.
823
    #[cfg_attr(feature = "inline-more", inline)]
824
    #[allow(clippy::needless_pass_by_value)]
825
0
    pub unsafe fn erase(&mut self, item: Bucket<T>) {
826
        // Erase the element from the table first since drop might panic.
827
0
        self.erase_no_drop(&item);
828
0
        item.drop();
829
0
    }
830
831
    /// Removes an element from the table, returning it.
832
    ///
833
    /// This also returns an index to the newly free bucket.
834
    #[cfg_attr(feature = "inline-more", inline)]
835
    #[allow(clippy::needless_pass_by_value)]
836
0
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, usize) {
837
0
        self.erase_no_drop(&item);
838
0
        (item.read(), self.bucket_index(&item))
839
0
    }
840
841
    /// Removes an element from the table, returning it.
842
    ///
843
    /// This also returns an index to the newly free bucket
844
    /// and the former `Tag` for that bucket.
845
    #[cfg_attr(feature = "inline-more", inline)]
846
    #[allow(clippy::needless_pass_by_value)]
847
478k
    pub(crate) unsafe fn remove_tagged(&mut self, item: Bucket<T>) -> (T, usize, Tag) {
848
478k
        let index = self.bucket_index(&item);
849
478k
        let tag = *self.table.ctrl(index);
850
478k
        self.table.erase(index);
851
478k
        (item.read(), index, tag)
852
478k
    }
<hashbrown::raw::RawTable<usize>>::remove_tagged
Line
Count
Source
847
478k
    pub(crate) unsafe fn remove_tagged(&mut self, item: Bucket<T>) -> (T, usize, Tag) {
848
478k
        let index = self.bucket_index(&item);
849
478k
        let tag = *self.table.ctrl(index);
850
478k
        self.table.erase(index);
851
478k
        (item.read(), index, tag)
852
478k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::remove_tagged
853
854
    /// Finds and removes an element from the table, returning it.
855
    #[cfg_attr(feature = "inline-more", inline)]
856
0
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
857
        // Avoid `Option::map` because it bloats LLVM IR.
858
0
        match self.find(hash, eq) {
859
0
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
860
0
            None => None,
861
        }
862
0
    }
863
864
    /// Marks all table buckets as empty without dropping their contents.
865
    #[cfg_attr(feature = "inline-more", inline)]
866
0
    pub fn clear_no_drop(&mut self) {
867
0
        self.table.clear_no_drop();
868
0
    }
869
870
    /// Removes all elements from the table without freeing the backing memory.
871
    #[cfg_attr(feature = "inline-more", inline)]
872
0
    pub fn clear(&mut self) {
873
0
        if self.is_empty() {
874
            // Special case empty table to avoid surprising O(capacity) time.
875
0
            return;
876
0
        }
877
        // Ensure that the table is reset even if one of the drops panic
878
0
        let mut self_ = guard(self, |self_| self_.clear_no_drop());
879
0
        unsafe {
880
0
            // SAFETY: ScopeGuard sets to zero the `items` field of the table
881
0
            // even in case of panic during the dropping of the elements so
882
0
            // that there will be no double drop of the elements.
883
0
            self_.table.drop_elements::<T>();
884
0
        }
885
0
    }
886
887
    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
888
    #[cfg_attr(feature = "inline-more", inline)]
889
0
    pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
890
        // Calculate the minimal number of elements that we need to reserve
891
        // space for.
892
0
        let min_size = usize::max(self.table.items, min_size);
893
0
        if min_size == 0 {
894
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
895
0
            unsafe {
896
0
                // SAFETY:
897
0
                // 1. We call the function only once;
898
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
899
0
                //    and [`TableLayout`] that were used to allocate this table.
900
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
901
0
                //    because we have replaced the inner table with a new one.
902
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
903
0
            }
904
0
            return;
905
0
        }
906
907
        // Calculate the number of buckets that we need for this number of
908
        // elements. If the calculation overflows then the requested bucket
909
        // count must be larger than what we have right and nothing needs to be
910
        // done.
911
0
        let min_buckets = match capacity_to_buckets(min_size, Self::TABLE_LAYOUT) {
912
0
            Some(buckets) => buckets,
913
0
            None => return,
914
        };
915
916
        // If we have more buckets than we need, shrink the table.
917
0
        if min_buckets < self.buckets() {
918
            // Fast path if the table is empty
919
0
            if self.table.items == 0 {
920
0
                let new_inner =
921
0
                    RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
922
0
                let mut old_inner = mem::replace(&mut self.table, new_inner);
923
0
                unsafe {
924
0
                    // SAFETY:
925
0
                    // 1. We call the function only once;
926
0
                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
927
0
                    //    and [`TableLayout`] that were used to allocate this table.
928
0
                    // 3. If any elements' drop function panics, then there will only be a memory leak,
929
0
                    //    because we have replaced the inner table with a new one.
930
0
                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
931
0
                }
932
            } else {
933
                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
934
                unsafe {
935
                    // SAFETY:
936
                    // 1. We know for sure that `min_size >= self.table.items`.
937
                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
938
                    //    we will never expose RawTable::new_uninitialized in a public API.
939
0
                    if self
940
0
                        .resize(min_size, hasher, Fallibility::Infallible)
941
0
                        .is_err()
942
                    {
943
                        // SAFETY: The result of calling the `resize` function cannot be an error
944
                        // because `fallibility == Fallibility::Infallible.
945
0
                        hint::unreachable_unchecked()
946
0
                    }
947
                }
948
            }
949
0
        }
950
0
    }
951
952
    /// Ensures that at least `additional` items can be inserted into the table
953
    /// without reallocation.
954
    #[cfg_attr(feature = "inline-more", inline)]
955
425k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
956
425k
        if unlikely(additional > self.table.growth_left) {
957
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
958
            unsafe {
959
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
960
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
961
27.0k
                if self
962
27.0k
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
963
27.0k
                    .is_err()
964
                {
965
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
966
0
                    hint::unreachable_unchecked()
967
27.0k
                }
968
            }
969
397k
        }
970
425k
    }
<hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Line
Count
Source
955
425k
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
956
425k
        if unlikely(additional > self.table.growth_left) {
957
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
958
            unsafe {
959
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
960
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
961
27.0k
                if self
962
27.0k
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
963
27.0k
                    .is_err()
964
                {
965
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
966
0
                    hint::unreachable_unchecked()
967
27.0k
                }
968
            }
969
397k
        }
970
425k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve::<_>
971
972
    /// Tries to ensure that at least `additional` items can be inserted into
973
    /// the table without reallocation.
974
    #[cfg_attr(feature = "inline-more", inline)]
975
0
    pub fn try_reserve(
976
0
        &mut self,
977
0
        additional: usize,
978
0
        hasher: impl Fn(&T) -> u64,
979
0
    ) -> Result<(), TryReserveError> {
980
0
        if additional > self.table.growth_left {
981
            // SAFETY: The [`RawTableInner`] must already have properly initialized control
982
            // bytes since we will never expose RawTable::new_uninitialized in a public API.
983
0
            unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
984
        } else {
985
0
            Ok(())
986
        }
987
0
    }
988
989
    /// Out-of-line slow path for `reserve` and `try_reserve`.
990
    ///
991
    /// # Safety
992
    ///
993
    /// The [`RawTableInner`] must have properly initialized control bytes,
994
    /// otherwise calling this function results in [`undefined behavior`]
995
    ///
996
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
997
    #[cold]
998
    #[inline(never)]
999
27.0k
    unsafe fn reserve_rehash(
1000
27.0k
        &mut self,
1001
27.0k
        additional: usize,
1002
27.0k
        hasher: impl Fn(&T) -> u64,
1003
27.0k
        fallibility: Fallibility,
1004
27.0k
    ) -> Result<(), TryReserveError> {
1005
        unsafe {
1006
            // SAFETY:
1007
            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1008
            //    [`TableLayout`] that were used to allocate this table.
1009
            // 2. The `drop` function is the actual drop function of the elements stored in
1010
            //    the table.
1011
            // 3. The caller ensures that the control bytes of the `RawTableInner`
1012
            //    are already initialized.
1013
27.0k
            self.table.reserve_rehash_inner(
1014
27.0k
                &self.alloc,
1015
27.0k
                additional,
1016
725k
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
<hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>::{closure#0}
Line
Count
Source
1016
725k
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve_rehash::<_>::{closure#0}
1017
27.0k
                fallibility,
1018
                Self::TABLE_LAYOUT,
1019
27.0k
                if T::NEEDS_DROP {
1020
0
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
1021
                } else {
1022
27.0k
                    None
1023
                },
1024
            )
1025
        }
1026
27.0k
    }
<hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Line
Count
Source
999
27.0k
    unsafe fn reserve_rehash(
1000
27.0k
        &mut self,
1001
27.0k
        additional: usize,
1002
27.0k
        hasher: impl Fn(&T) -> u64,
1003
27.0k
        fallibility: Fallibility,
1004
27.0k
    ) -> Result<(), TryReserveError> {
1005
        unsafe {
1006
            // SAFETY:
1007
            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1008
            //    [`TableLayout`] that were used to allocate this table.
1009
            // 2. The `drop` function is the actual drop function of the elements stored in
1010
            //    the table.
1011
            // 3. The caller ensures that the control bytes of the `RawTableInner`
1012
            //    are already initialized.
1013
27.0k
            self.table.reserve_rehash_inner(
1014
27.0k
                &self.alloc,
1015
27.0k
                additional,
1016
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1017
27.0k
                fallibility,
1018
                Self::TABLE_LAYOUT,
1019
27.0k
                if T::NEEDS_DROP {
1020
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
1021
                } else {
1022
27.0k
                    None
1023
                },
1024
            )
1025
        }
1026
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve_rehash::<_>
1027
1028
    /// Allocates a new table of a different size and moves the contents of the
1029
    /// current table into it.
1030
    ///
1031
    /// # Safety
1032
    ///
1033
    /// The [`RawTableInner`] must have properly initialized control bytes,
1034
    /// otherwise calling this function results in [`undefined behavior`]
1035
    ///
1036
    /// The caller of this function must ensure that `capacity >= self.table.items`
1037
    /// otherwise:
1038
    ///
1039
    /// * If `self.table.items != 0`, calling of this function with `capacity`
1040
    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1041
    ///
1042
    /// * If `self.table.items > capacity_to_buckets(capacity, Self::TABLE_LAYOUT)`
1043
    ///   calling this function are never return (will loop infinitely).
1044
    ///
1045
    /// See [`RawTableInner::find_insert_index`] for more information.
1046
    ///
1047
    /// [`RawTableInner::find_insert_index`]: RawTableInner::find_insert_index
1048
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1049
0
    unsafe fn resize(
1050
0
        &mut self,
1051
0
        capacity: usize,
1052
0
        hasher: impl Fn(&T) -> u64,
1053
0
        fallibility: Fallibility,
1054
0
    ) -> Result<(), TryReserveError> {
1055
        // SAFETY:
1056
        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1057
        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1058
        //    [`TableLayout`] that were used to allocate this table.
1059
        // 3. The caller ensures that the control bytes of the `RawTableInner`
1060
        //    are already initialized.
1061
0
        self.table.resize_inner(
1062
0
            &self.alloc,
1063
0
            capacity,
1064
0
            &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1065
0
            fallibility,
1066
            Self::TABLE_LAYOUT,
1067
        )
1068
0
    }
1069
1070
    /// Inserts a new element into the table, and returns its raw bucket.
1071
    ///
1072
    /// This does not check if the given element already exists in the table.
1073
    #[cfg_attr(feature = "inline-more", inline)]
1074
57.0k
    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1075
        unsafe {
1076
            // SAFETY:
1077
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1078
            //    we will never expose `RawTable::new_uninitialized` in a public API.
1079
            //
1080
            // 2. We reserve additional space (if necessary) right after calling this function.
1081
57.0k
            let mut index = self.table.find_insert_index(hash);
1082
1083
            // We can avoid growing the table once we have reached our load factor if we are replacing
1084
            // a tombstone. This works since the number of EMPTY slots does not change in this case.
1085
            //
1086
            // SAFETY: The function is guaranteed to return an index in the range `0..=self.buckets()`.
1087
57.0k
            let old_ctrl = *self.table.ctrl(index);
1088
57.0k
            if unlikely(self.table.growth_left == 0 && old_ctrl.special_is_empty()) {
1089
3.35k
                self.reserve(1, hasher);
1090
3.35k
                // SAFETY: We know for sure that `RawTableInner` has control bytes
1091
3.35k
                // initialized and that there is extra space in the table.
1092
3.35k
                index = self.table.find_insert_index(hash);
1093
53.6k
            }
1094
1095
57.0k
            self.insert_at_index(hash, index, value)
1096
        }
1097
57.0k
    }
<hashbrown::raw::RawTable<usize>>::insert::<indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Line
Count
Source
1074
57.0k
    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1075
        unsafe {
1076
            // SAFETY:
1077
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1078
            //    we will never expose `RawTable::new_uninitialized` in a public API.
1079
            //
1080
            // 2. We reserve additional space (if necessary) right after calling this function.
1081
57.0k
            let mut index = self.table.find_insert_index(hash);
1082
1083
            // We can avoid growing the table once we have reached our load factor if we are replacing
1084
            // a tombstone. This works since the number of EMPTY slots does not change in this case.
1085
            //
1086
            // SAFETY: The function is guaranteed to return an index in the range `0..=self.buckets()`.
1087
57.0k
            let old_ctrl = *self.table.ctrl(index);
1088
57.0k
            if unlikely(self.table.growth_left == 0 && old_ctrl.special_is_empty()) {
1089
3.35k
                self.reserve(1, hasher);
1090
3.35k
                // SAFETY: We know for sure that `RawTableInner` has control bytes
1091
3.35k
                // initialized and that there is extra space in the table.
1092
3.35k
                index = self.table.find_insert_index(hash);
1093
53.6k
            }
1094
1095
57.0k
            self.insert_at_index(hash, index, value)
1096
        }
1097
57.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::insert::<_>
1098
1099
    /// Inserts a new element into the table, and returns a mutable reference to it.
1100
    ///
1101
    /// This does not check if the given element already exists in the table.
1102
    #[cfg_attr(feature = "inline-more", inline)]
1103
0
    pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
1104
0
        unsafe { self.insert(hash, value, hasher).as_mut() }
1105
0
    }
1106
1107
    /// Inserts a new element into the table, without growing the table.
1108
    ///
1109
    /// There must be enough space in the table to insert the new element.
1110
    ///
1111
    /// This does not check if the given element already exists in the table.
1112
    #[cfg_attr(feature = "inline-more", inline)]
1113
    #[cfg(feature = "rustc-internal-api")]
1114
    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1115
        let (index, old_ctrl) = self.table.prepare_insert_index(hash);
1116
        let bucket = self.table.bucket(index);
1117
1118
        // If we are replacing a DELETED entry then we don't need to update
1119
        // the load counter.
1120
        self.table.growth_left -= old_ctrl.special_is_empty() as usize;
1121
1122
        bucket.write(value);
1123
        self.table.items += 1;
1124
        bucket
1125
    }
1126
1127
    /// Temporary removes a bucket, applying the given function to the removed
1128
    /// element and optionally put back the returned value in the same bucket.
1129
    ///
1130
    /// Returns `true` if the bucket still contains an element
1131
    ///
1132
    /// This does not check if the given bucket is actually occupied.
1133
    #[cfg_attr(feature = "inline-more", inline)]
1134
0
    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1135
0
    where
1136
0
        F: FnOnce(T) -> Option<T>,
1137
    {
1138
0
        let index = self.bucket_index(&bucket);
1139
0
        let old_ctrl = *self.table.ctrl(index);
1140
0
        debug_assert!(self.is_bucket_full(index));
1141
0
        let old_growth_left = self.table.growth_left;
1142
0
        let item = self.remove(bucket).0;
1143
0
        if let Some(new_item) = f(item) {
1144
0
            self.table.growth_left = old_growth_left;
1145
0
            self.table.set_ctrl(index, old_ctrl);
1146
0
            self.table.items += 1;
1147
0
            self.bucket(index).write(new_item);
1148
0
            true
1149
        } else {
1150
0
            false
1151
        }
1152
0
    }
1153
1154
    /// Searches for an element in the table. If the element is not found,
1155
    /// returns `Err` with the position of a slot where an element with the
1156
    /// same hash could be inserted.
1157
    ///
1158
    /// This function may resize the table if additional space is required for
1159
    /// inserting an element.
1160
    #[inline]
1161
421k
    pub fn find_or_find_insert_index(
1162
421k
        &mut self,
1163
421k
        hash: u64,
1164
421k
        mut eq: impl FnMut(&T) -> bool,
1165
421k
        hasher: impl Fn(&T) -> u64,
1166
421k
    ) -> Result<Bucket<T>, usize> {
1167
421k
        self.reserve(1, hasher);
1168
1169
        unsafe {
1170
            // SAFETY:
1171
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1172
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1173
            //    never expose `RawTable::new_uninitialized` in a public API.
1174
            // 3. The `find_or_find_insert_index_inner` function returns the `index` of only the full bucket,
1175
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1176
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1177
421k
            match self
1178
421k
                .table
1179
421k
                .find_or_find_insert_index_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
<hashbrown::raw::RawTable<usize>>::find_or_find_insert_index::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}, indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>::{closure#0}
Line
Count
Source
1179
34.0k
                .find_or_find_insert_index_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find_or_find_insert_index::<_, _>::{closure#0}
1180
            {
1181
                // SAFETY: See explanation above.
1182
0
                Ok(index) => Ok(self.bucket(index)),
1183
421k
                Err(index) => Err(index),
1184
            }
1185
        }
1186
421k
    }
<hashbrown::raw::RawTable<usize>>::find_or_find_insert_index::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}, indexmap::map::core::get_hash<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex>::{closure#0}>
Line
Count
Source
1161
421k
    pub fn find_or_find_insert_index(
1162
421k
        &mut self,
1163
421k
        hash: u64,
1164
421k
        mut eq: impl FnMut(&T) -> bool,
1165
421k
        hasher: impl Fn(&T) -> u64,
1166
421k
    ) -> Result<Bucket<T>, usize> {
1167
421k
        self.reserve(1, hasher);
1168
1169
        unsafe {
1170
            // SAFETY:
1171
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1172
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1173
            //    never expose `RawTable::new_uninitialized` in a public API.
1174
            // 3. The `find_or_find_insert_index_inner` function returns the `index` of only the full bucket,
1175
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1176
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1177
421k
            match self
1178
421k
                .table
1179
421k
                .find_or_find_insert_index_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1180
            {
1181
                // SAFETY: See explanation above.
1182
0
                Ok(index) => Ok(self.bucket(index)),
1183
421k
                Err(index) => Err(index),
1184
            }
1185
        }
1186
421k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find_or_find_insert_index::<_, _>
1187
1188
    /// Inserts a new element into the table at the given index with the given hash,
1189
    /// and returns its raw bucket.
1190
    ///
1191
    /// # Safety
1192
    ///
1193
    /// `index` must point to a slot previously returned by
1194
    /// `find_or_find_insert_index`, and no mutation of the table must have
1195
    /// occurred since that call.
1196
    #[inline]
1197
57.0k
    pub unsafe fn insert_at_index(&mut self, hash: u64, index: usize, value: T) -> Bucket<T> {
1198
57.0k
        self.insert_tagged_at_index(Tag::full(hash), index, value)
1199
57.0k
    }
<hashbrown::raw::RawTable<usize>>::insert_at_index
Line
Count
Source
1197
57.0k
    pub unsafe fn insert_at_index(&mut self, hash: u64, index: usize, value: T) -> Bucket<T> {
1198
57.0k
        self.insert_tagged_at_index(Tag::full(hash), index, value)
1199
57.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::insert_at_index
1200
1201
    /// Inserts a new element into the table at the given index with the given tag,
1202
    /// and returns its raw bucket.
1203
    ///
1204
    /// # Safety
1205
    ///
1206
    /// `index` must point to a slot previously returned by
1207
    /// `find_or_find_insert_index`, and no mutation of the table must have
1208
    /// occurred since that call.
1209
    #[inline]
1210
478k
    pub(crate) unsafe fn insert_tagged_at_index(
1211
478k
        &mut self,
1212
478k
        tag: Tag,
1213
478k
        index: usize,
1214
478k
        value: T,
1215
478k
    ) -> Bucket<T> {
1216
478k
        let old_ctrl = *self.table.ctrl(index);
1217
478k
        self.table.record_item_insert_at(index, old_ctrl, tag);
1218
1219
478k
        let bucket = self.bucket(index);
1220
478k
        bucket.write(value);
1221
478k
        bucket
1222
478k
    }
<hashbrown::raw::RawTable<usize>>::insert_tagged_at_index
Line
Count
Source
1210
478k
    pub(crate) unsafe fn insert_tagged_at_index(
1211
478k
        &mut self,
1212
478k
        tag: Tag,
1213
478k
        index: usize,
1214
478k
        value: T,
1215
478k
    ) -> Bucket<T> {
1216
478k
        let old_ctrl = *self.table.ctrl(index);
1217
478k
        self.table.record_item_insert_at(index, old_ctrl, tag);
1218
1219
478k
        let bucket = self.bucket(index);
1220
478k
        bucket.write(value);
1221
478k
        bucket
1222
478k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::insert_tagged_at_index
1223
1224
    /// Searches for an element in the table.
1225
    #[inline]
1226
1.15M
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1227
        unsafe {
1228
            // SAFETY:
1229
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1230
            //    will never expose `RawTable::new_uninitialized` in a public API.
1231
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1232
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1233
            //    is safe.
1234
1.15M
            let result = self
1235
1.15M
                .table
1236
1.15M
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}>::{closure#0}
Line
Count
Source
1236
542k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::erase_index::{closure#0}>::{closure#0}
Line
Count
Source
1236
6.47k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
<hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::update_index::{closure#0}>::{closure#0}
Line
Count
Source
1236
444k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find::<_>::{closure#0}
1237
1238
            // Avoid `Option::map` because it bloats LLVM IR.
1239
1.15M
            match result {
1240
                // SAFETY: See explanation above.
1241
974k
                Some(index) => Some(self.bucket(index)),
1242
185k
                None => None,
1243
            }
1244
        }
1245
1.15M
    }
<hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}>
Line
Count
Source
1226
710k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1227
        unsafe {
1228
            // SAFETY:
1229
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1230
            //    will never expose `RawTable::new_uninitialized` in a public API.
1231
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1232
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1233
            //    is safe.
1234
710k
            let result = self
1235
710k
                .table
1236
710k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1237
1238
            // Avoid `Option::map` because it bloats LLVM IR.
1239
710k
            match result {
1240
                // SAFETY: See explanation above.
1241
525k
                Some(index) => Some(self.bucket(index)),
1242
185k
                None => None,
1243
            }
1244
        }
1245
710k
    }
<hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::erase_index::{closure#0}>
Line
Count
Source
1226
6.47k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1227
        unsafe {
1228
            // SAFETY:
1229
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1230
            //    will never expose `RawTable::new_uninitialized` in a public API.
1231
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1232
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1233
            //    is safe.
1234
6.47k
            let result = self
1235
6.47k
                .table
1236
6.47k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1237
1238
            // Avoid `Option::map` because it bloats LLVM IR.
1239
6.47k
            match result {
1240
                // SAFETY: See explanation above.
1241
6.47k
                Some(index) => Some(self.bucket(index)),
1242
0
                None => None,
1243
            }
1244
        }
1245
6.47k
    }
<hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::update_index::{closure#0}>
Line
Count
Source
1226
441k
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1227
        unsafe {
1228
            // SAFETY:
1229
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1230
            //    will never expose `RawTable::new_uninitialized` in a public API.
1231
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1232
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1233
            //    is safe.
1234
441k
            let result = self
1235
441k
                .table
1236
441k
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1237
1238
            // Avoid `Option::map` because it bloats LLVM IR.
1239
441k
            match result {
1240
                // SAFETY: See explanation above.
1241
441k
                Some(index) => Some(self.bucket(index)),
1242
0
                None => None,
1243
            }
1244
        }
1245
441k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find::<_>
1246
1247
    /// Gets a reference to an element in the table.
1248
    #[inline]
1249
71.0k
    pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1250
        // Avoid `Option::map` because it bloats LLVM IR.
1251
71.0k
        match self.find(hash, eq) {
1252
24.3k
            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1253
46.6k
            None => None,
1254
        }
1255
71.0k
    }
<hashbrown::raw::RawTable<usize>>::get::<indexmap::map::core::equivalent<h2::frame::stream_id::StreamId, h2::proto::streams::store::SlabIndex, h2::frame::stream_id::StreamId>::{closure#0}>
Line
Count
Source
1249
71.0k
    pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1250
        // Avoid `Option::map` because it bloats LLVM IR.
1251
71.0k
        match self.find(hash, eq) {
1252
24.3k
            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1253
46.6k
            None => None,
1254
        }
1255
71.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::get::<_>
1256
1257
    /// Gets a mutable reference to an element in the table.
1258
    #[inline]
1259
441k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1260
        // Avoid `Option::map` because it bloats LLVM IR.
1261
441k
        match self.find(hash, eq) {
1262
441k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1263
0
            None => None,
1264
        }
1265
441k
    }
<hashbrown::raw::RawTable<usize>>::get_mut::<indexmap::map::core::update_index::{closure#0}>
Line
Count
Source
1259
441k
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1260
        // Avoid `Option::map` because it bloats LLVM IR.
1261
441k
        match self.find(hash, eq) {
1262
441k
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1263
0
            None => None,
1264
        }
1265
441k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::get_mut::<_>
1266
1267
    /// Gets a reference to an element in the table at the given bucket index.
1268
    #[inline]
1269
0
    pub fn get_bucket(&self, index: usize) -> Option<&T> {
1270
        unsafe {
1271
0
            if index < self.buckets() && self.is_bucket_full(index) {
1272
0
                Some(self.bucket(index).as_ref())
1273
            } else {
1274
0
                None
1275
            }
1276
        }
1277
0
    }
1278
1279
    /// Gets a mutable reference to an element in the table at the given bucket index.
1280
    #[inline]
1281
0
    pub fn get_bucket_mut(&mut self, index: usize) -> Option<&mut T> {
1282
        unsafe {
1283
0
            if index < self.buckets() && self.is_bucket_full(index) {
1284
0
                Some(self.bucket(index).as_mut())
1285
            } else {
1286
0
                None
1287
            }
1288
        }
1289
0
    }
1290
1291
    /// Returns a pointer to an element in the table, but only after verifying that
1292
    /// the index is in-bounds and the bucket is occupied.
1293
    #[inline]
1294
0
    pub fn checked_bucket(&self, index: usize) -> Option<Bucket<T>> {
1295
        unsafe {
1296
0
            if index < self.buckets() && self.is_bucket_full(index) {
1297
0
                Some(self.bucket(index))
1298
            } else {
1299
0
                None
1300
            }
1301
        }
1302
0
    }
1303
1304
    /// Attempts to get mutable references to `N` entries in the table at once.
1305
    ///
1306
    /// Returns an array of length `N` with the results of each query.
1307
    ///
1308
    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1309
    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1310
    ///
1311
    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1312
    /// the `i`th key to be looked up.
1313
0
    pub fn get_disjoint_mut<const N: usize>(
1314
0
        &mut self,
1315
0
        hashes: [u64; N],
1316
0
        eq: impl FnMut(usize, &T) -> bool,
1317
0
    ) -> [Option<&'_ mut T>; N] {
1318
        unsafe {
1319
0
            let ptrs = self.get_disjoint_mut_pointers(hashes, eq);
1320
1321
0
            for (i, cur) in ptrs.iter().enumerate() {
1322
0
                if cur.is_some() && ptrs[..i].contains(cur) {
1323
0
                    panic!("duplicate keys found");
1324
0
                }
1325
            }
1326
            // All bucket are distinct from all previous buckets so we're clear to return the result
1327
            // of the lookup.
1328
1329
0
            ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1330
        }
1331
0
    }
1332
1333
0
    pub unsafe fn get_disjoint_unchecked_mut<const N: usize>(
1334
0
        &mut self,
1335
0
        hashes: [u64; N],
1336
0
        eq: impl FnMut(usize, &T) -> bool,
1337
0
    ) -> [Option<&'_ mut T>; N] {
1338
0
        let ptrs = self.get_disjoint_mut_pointers(hashes, eq);
1339
0
        ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1340
0
    }
1341
1342
0
    unsafe fn get_disjoint_mut_pointers<const N: usize>(
1343
0
        &mut self,
1344
0
        hashes: [u64; N],
1345
0
        mut eq: impl FnMut(usize, &T) -> bool,
1346
0
    ) -> [Option<NonNull<T>>; N] {
1347
0
        array::from_fn(|i| {
1348
0
            self.find(hashes[i], |k| eq(i, k))
1349
0
                .map(|cur| cur.as_non_null())
1350
0
        })
1351
0
    }
1352
1353
    /// Returns the number of elements the map can hold without reallocating.
1354
    ///
1355
    /// This number is a lower bound; the table might be able to hold
1356
    /// more, but is guaranteed to be able to hold at least this many.
1357
    #[inline]
1358
27.0k
    pub fn capacity(&self) -> usize {
1359
27.0k
        self.table.items + self.table.growth_left
1360
27.0k
    }
<hashbrown::raw::RawTable<usize>>::capacity
Line
Count
Source
1358
27.0k
    pub fn capacity(&self) -> usize {
1359
27.0k
        self.table.items + self.table.growth_left
1360
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::capacity
1361
1362
    /// Returns the number of elements in the table.
1363
    #[inline]
1364
1.78M
    pub fn len(&self) -> usize {
1365
1.78M
        self.table.items
1366
1.78M
    }
<hashbrown::raw::RawTable<usize>>::len
Line
Count
Source
1364
1.78M
    pub fn len(&self) -> usize {
1365
1.78M
        self.table.items
1366
1.78M
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::len
1367
1368
    /// Returns `true` if the table contains no elements.
1369
    #[inline]
1370
0
    pub fn is_empty(&self) -> bool {
1371
0
        self.len() == 0
1372
0
    }
1373
1374
    /// Returns the number of buckets in the table.
1375
    #[inline]
1376
0
    pub fn buckets(&self) -> usize {
1377
0
        self.table.bucket_mask + 1
1378
0
    }
1379
1380
    /// Checks whether the bucket at `index` is full.
1381
    ///
1382
    /// # Safety
1383
    ///
1384
    /// The caller must ensure `index` is less than the number of buckets.
1385
    #[inline]
1386
0
    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1387
0
        self.table.is_bucket_full(index)
1388
0
    }
1389
1390
    /// Returns an iterator over every element in the table. It is up to
1391
    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1392
    /// Because we cannot make the `next` method unsafe on the `RawIter`
1393
    /// struct, we have to make the `iter` method unsafe.
1394
    #[inline]
1395
0
    pub unsafe fn iter(&self) -> RawIter<T> {
1396
        // SAFETY:
1397
        // 1. The caller must uphold the safety contract for `iter` method.
1398
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1399
        //    we will never expose RawTable::new_uninitialized in a public API.
1400
0
        self.table.iter()
1401
0
    }
1402
1403
    /// Returns an iterator over occupied buckets that could match a given hash.
1404
    ///
1405
    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1406
    /// return items that have a hash value different than the one provided. You
1407
    /// should always validate the returned values before using them.
1408
    ///
1409
    /// It is up to the caller to ensure that the `RawTable` outlives the
1410
    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1411
    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1412
    #[cfg_attr(feature = "inline-more", inline)]
1413
0
    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1414
0
        RawIterHash::new(self, hash)
1415
0
    }
1416
1417
    /// Returns an iterator over occupied bucket indices that could match a given hash.
1418
    ///
1419
    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1420
    /// return items that have a hash value different than the one provided. You
1421
    /// should always validate the returned values before using them.
1422
    ///
1423
    /// It is up to the caller to ensure that the `RawTable` outlives the
1424
    /// `RawIterHashIndices`. Because we cannot make the `next` method unsafe on the
1425
    /// `RawIterHashIndices` struct, we have to make the `iter_hash_buckets` method unsafe.
1426
    #[cfg_attr(feature = "inline-more", inline)]
1427
0
    pub(crate) unsafe fn iter_hash_buckets(&self, hash: u64) -> RawIterHashIndices {
1428
0
        RawIterHashIndices::new(&self.table, hash)
1429
0
    }
1430
1431
    /// Returns an iterator over full buckets indices in the table.
1432
    ///
1433
    /// See [`RawTableInner::full_buckets_indices`] for safety conditions.
1434
    #[inline(always)]
1435
0
    pub(crate) unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
1436
0
        self.table.full_buckets_indices()
1437
0
    }
1438
1439
    /// Returns an iterator which removes all elements from the table without
1440
    /// freeing the memory.
1441
    #[cfg_attr(feature = "inline-more", inline)]
1442
0
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1443
        unsafe {
1444
0
            let iter = self.iter();
1445
0
            self.drain_iter_from(iter)
1446
        }
1447
0
    }
1448
1449
    /// Returns an iterator which removes all elements from the table without
1450
    /// freeing the memory.
1451
    ///
1452
    /// Iteration starts at the provided iterator's current location.
1453
    ///
1454
    /// It is up to the caller to ensure that the iterator is valid for this
1455
    /// `RawTable` and covers all items that remain in the table.
1456
    #[cfg_attr(feature = "inline-more", inline)]
1457
0
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1458
0
        debug_assert_eq!(iter.len(), self.len());
1459
0
        RawDrain {
1460
0
            iter,
1461
0
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1462
0
            orig_table: NonNull::from(&mut self.table),
1463
0
            marker: PhantomData,
1464
0
        }
1465
0
    }
1466
1467
    /// Returns an iterator which consumes all elements from the table.
1468
    ///
1469
    /// Iteration starts at the provided iterator's current location.
1470
    ///
1471
    /// It is up to the caller to ensure that the iterator is valid for this
1472
    /// `RawTable` and covers all items that remain in the table.
1473
0
    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1474
0
        debug_assert_eq!(iter.len(), self.len());
1475
1476
0
        let allocation = self.into_allocation();
1477
0
        RawIntoIter {
1478
0
            iter,
1479
0
            allocation,
1480
0
            marker: PhantomData,
1481
0
        }
1482
0
    }
1483
1484
    /// Converts the table into a raw allocation. The contents of the table
1485
    /// should be dropped using a `RawIter` before freeing the allocation.
1486
    #[cfg_attr(feature = "inline-more", inline)]
1487
0
    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1488
0
        let alloc = if self.table.is_empty_singleton() {
1489
0
            None
1490
        } else {
1491
            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1492
0
            let (layout, ctrl_offset) =
1493
0
                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1494
0
                    Some(lco) => lco,
1495
0
                    None => unsafe { hint::unreachable_unchecked() },
1496
                };
1497
0
            Some((
1498
0
                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset).cast()) },
1499
0
                layout,
1500
0
                unsafe { ptr::read(&self.alloc) },
1501
0
            ))
1502
        };
1503
0
        mem::forget(self);
1504
0
        alloc
1505
0
    }
1506
}
1507
1508
unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1509
where
1510
    T: Send,
1511
    A: Send,
1512
{
1513
}
1514
unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1515
where
1516
    T: Sync,
1517
    A: Sync,
1518
{
1519
}
1520
1521
impl RawTableInner {
1522
    const NEW: Self = RawTableInner::new();
1523
1524
    /// Creates a new empty hash table without allocating any memory.
1525
    ///
1526
    /// In effect this returns a table with exactly 1 bucket. However we can
1527
    /// leave the data pointer dangling since that bucket is never accessed
1528
    /// due to our load factor forcing us to always have at least 1 free bucket.
1529
    #[inline]
1530
0
    const fn new() -> Self {
1531
0
        Self {
1532
0
            // Be careful to cast the entire slice to a raw pointer.
1533
0
            ctrl: unsafe {
1534
0
                NonNull::new_unchecked(Group::static_empty().as_ptr().cast_mut().cast())
1535
0
            },
1536
0
            bucket_mask: 0,
1537
0
            items: 0,
1538
0
            growth_left: 0,
1539
0
        }
1540
0
    }
1541
}
1542
1543
/// Find the previous power of 2. If it's already a power of 2, it's unchanged.
1544
/// Passing zero is undefined behavior.
1545
0
pub(crate) fn prev_pow2(z: usize) -> usize {
1546
0
    let shift = mem::size_of::<usize>() * 8 - 1;
1547
0
    1 << (shift - (z.leading_zeros() as usize))
1548
0
}
1549
1550
/// Finds the largest number of buckets that can fit in `allocation_size`
1551
/// provided the given TableLayout.
1552
///
1553
/// This relies on some invariants of `capacity_to_buckets`, so only feed in
1554
/// an `allocation_size` calculated from `capacity_to_buckets`.
1555
0
fn maximum_buckets_in(
1556
0
    allocation_size: usize,
1557
0
    table_layout: TableLayout,
1558
0
    group_width: usize,
1559
0
) -> usize {
1560
    // Given an equation like:
1561
    //   z >= x * y + x + g
1562
    // x can be maximized by doing:
1563
    //   x = (z - g) / (y + 1)
1564
    // If you squint:
1565
    //   x is the number of buckets
1566
    //   y is the table_layout.size
1567
    //   z is the size of the allocation
1568
    //   g is the group width
1569
    // But this is ignoring the padding needed for ctrl_align.
1570
    // If we remember these restrictions:
1571
    //   x is always a power of 2
1572
    //   Layout size for T must always be a multiple of T
1573
    // Then the alignment can be ignored if we add the constraint:
1574
    //   x * y >= table_layout.ctrl_align
1575
    // This is taken care of by `capacity_to_buckets`.
1576
    // It may be helpful to understand this if you remember that:
1577
    //   ctrl_offset = align(x * y, ctrl_align)
1578
0
    let x = (allocation_size - group_width) / (table_layout.size + 1);
1579
0
    prev_pow2(x)
1580
0
}
1581
1582
impl RawTableInner {
1583
    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1584
    /// The control bytes and buckets are left uninitialized.
1585
    ///
1586
    /// # Safety
1587
    ///
1588
    /// The caller of this function must ensure that the `buckets` is power of two
1589
    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1590
    /// Group::WIDTH` with the [`Tag::EMPTY`] bytes.
1591
    ///
1592
    /// See also [`Allocator`] API for other safety concerns.
1593
    ///
1594
    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1595
    #[cfg_attr(feature = "inline-more", inline)]
1596
27.0k
    unsafe fn new_uninitialized<A>(
1597
27.0k
        alloc: &A,
1598
27.0k
        table_layout: TableLayout,
1599
27.0k
        mut buckets: usize,
1600
27.0k
        fallibility: Fallibility,
1601
27.0k
    ) -> Result<Self, TryReserveError>
1602
27.0k
    where
1603
27.0k
        A: Allocator,
1604
    {
1605
27.0k
        debug_assert!(buckets.is_power_of_two());
1606
1607
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1608
27.0k
        let (layout, mut ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1609
27.0k
            Some(lco) => lco,
1610
0
            None => return Err(fallibility.capacity_overflow()),
1611
        };
1612
1613
27.0k
        let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1614
27.0k
            Ok(block) => {
1615
                // The allocator can't return a value smaller than was
1616
                // requested, so this can be != instead of >=.
1617
27.0k
                if block.len() != layout.size() {
1618
                    // Utilize over-sized allocations.
1619
0
                    let x = maximum_buckets_in(block.len(), table_layout, Group::WIDTH);
1620
0
                    debug_assert!(x >= buckets);
1621
                    // Calculate the new ctrl_offset.
1622
0
                    let (oversized_layout, oversized_ctrl_offset) =
1623
0
                        match table_layout.calculate_layout_for(x) {
1624
0
                            Some(lco) => lco,
1625
0
                            None => unsafe { hint::unreachable_unchecked() },
1626
                        };
1627
0
                    debug_assert!(oversized_layout.size() <= block.len());
1628
0
                    debug_assert!(oversized_ctrl_offset >= ctrl_offset);
1629
0
                    ctrl_offset = oversized_ctrl_offset;
1630
0
                    buckets = x;
1631
27.0k
                }
1632
1633
27.0k
                block.cast()
1634
            }
1635
0
            Err(_) => return Err(fallibility.alloc_err(layout)),
1636
        };
1637
1638
        // SAFETY: null pointer will be caught in above check
1639
27.0k
        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1640
27.0k
        Ok(Self {
1641
27.0k
            ctrl,
1642
27.0k
            bucket_mask: buckets - 1,
1643
27.0k
            items: 0,
1644
27.0k
            growth_left: bucket_mask_to_capacity(buckets - 1),
1645
27.0k
        })
1646
27.0k
    }
<hashbrown::raw::RawTableInner>::new_uninitialized::<hashbrown::raw::alloc::inner::Global>
Line
Count
Source
1596
27.0k
    unsafe fn new_uninitialized<A>(
1597
27.0k
        alloc: &A,
1598
27.0k
        table_layout: TableLayout,
1599
27.0k
        mut buckets: usize,
1600
27.0k
        fallibility: Fallibility,
1601
27.0k
    ) -> Result<Self, TryReserveError>
1602
27.0k
    where
1603
27.0k
        A: Allocator,
1604
    {
1605
27.0k
        debug_assert!(buckets.is_power_of_two());
1606
1607
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1608
27.0k
        let (layout, mut ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1609
27.0k
            Some(lco) => lco,
1610
0
            None => return Err(fallibility.capacity_overflow()),
1611
        };
1612
1613
27.0k
        let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1614
27.0k
            Ok(block) => {
1615
                // The allocator can't return a value smaller than was
1616
                // requested, so this can be != instead of >=.
1617
27.0k
                if block.len() != layout.size() {
1618
                    // Utilize over-sized allocations.
1619
0
                    let x = maximum_buckets_in(block.len(), table_layout, Group::WIDTH);
1620
0
                    debug_assert!(x >= buckets);
1621
                    // Calculate the new ctrl_offset.
1622
0
                    let (oversized_layout, oversized_ctrl_offset) =
1623
0
                        match table_layout.calculate_layout_for(x) {
1624
0
                            Some(lco) => lco,
1625
0
                            None => unsafe { hint::unreachable_unchecked() },
1626
                        };
1627
0
                    debug_assert!(oversized_layout.size() <= block.len());
1628
0
                    debug_assert!(oversized_ctrl_offset >= ctrl_offset);
1629
0
                    ctrl_offset = oversized_ctrl_offset;
1630
0
                    buckets = x;
1631
27.0k
                }
1632
1633
27.0k
                block.cast()
1634
            }
1635
0
            Err(_) => return Err(fallibility.alloc_err(layout)),
1636
        };
1637
1638
        // SAFETY: null pointer will be caught in above check
1639
27.0k
        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1640
27.0k
        Ok(Self {
1641
27.0k
            ctrl,
1642
27.0k
            bucket_mask: buckets - 1,
1643
27.0k
            items: 0,
1644
27.0k
            growth_left: bucket_mask_to_capacity(buckets - 1),
1645
27.0k
        })
1646
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::new_uninitialized::<_>
1647
1648
    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1649
    /// capacity for inserting the given number of elements without reallocating.
1650
    ///
1651
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1652
    #[inline]
1653
27.0k
    fn fallible_with_capacity<A>(
1654
27.0k
        alloc: &A,
1655
27.0k
        table_layout: TableLayout,
1656
27.0k
        capacity: usize,
1657
27.0k
        fallibility: Fallibility,
1658
27.0k
    ) -> Result<Self, TryReserveError>
1659
27.0k
    where
1660
27.0k
        A: Allocator,
1661
    {
1662
27.0k
        if capacity == 0 {
1663
0
            Ok(Self::NEW)
1664
        } else {
1665
            // SAFETY: We checked that we could successfully allocate the new table, and then
1666
            // initialized all control bytes with the constant `Tag::EMPTY` byte.
1667
            unsafe {
1668
27.0k
                let buckets = capacity_to_buckets(capacity, table_layout)
1669
27.0k
                    .ok_or_else(|| fallibility.capacity_overflow())?;
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<hashbrown::raw::alloc::inner::Global>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<_>::{closure#0}
1670
1671
27.0k
                let mut result =
1672
27.0k
                    Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1673
                // SAFETY: We checked that the table is allocated and therefore the table already has
1674
                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1675
                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1676
27.0k
                result.ctrl_slice().fill_empty();
1677
1678
27.0k
                Ok(result)
1679
            }
1680
        }
1681
27.0k
    }
<hashbrown::raw::RawTableInner>::fallible_with_capacity::<hashbrown::raw::alloc::inner::Global>
Line
Count
Source
1653
27.0k
    fn fallible_with_capacity<A>(
1654
27.0k
        alloc: &A,
1655
27.0k
        table_layout: TableLayout,
1656
27.0k
        capacity: usize,
1657
27.0k
        fallibility: Fallibility,
1658
27.0k
    ) -> Result<Self, TryReserveError>
1659
27.0k
    where
1660
27.0k
        A: Allocator,
1661
    {
1662
27.0k
        if capacity == 0 {
1663
0
            Ok(Self::NEW)
1664
        } else {
1665
            // SAFETY: We checked that we could successfully allocate the new table, and then
1666
            // initialized all control bytes with the constant `Tag::EMPTY` byte.
1667
            unsafe {
1668
27.0k
                let buckets = capacity_to_buckets(capacity, table_layout)
1669
27.0k
                    .ok_or_else(|| fallibility.capacity_overflow())?;
1670
1671
27.0k
                let mut result =
1672
27.0k
                    Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1673
                // SAFETY: We checked that the table is allocated and therefore the table already has
1674
                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1675
                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1676
27.0k
                result.ctrl_slice().fill_empty();
1677
1678
27.0k
                Ok(result)
1679
            }
1680
        }
1681
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<_>
1682
1683
    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1684
    /// the given number of elements without reallocating.
1685
    ///
1686
    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1687
    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1688
    /// handle memory allocation failure.
1689
    ///
1690
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1691
    ///
1692
    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1693
    /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
1694
0
    fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
1695
0
    where
1696
0
        A: Allocator,
1697
    {
1698
        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1699
0
        match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
1700
0
            Ok(table_inner) => table_inner,
1701
            // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
1702
0
            Err(_) => unsafe { hint::unreachable_unchecked() },
1703
        }
1704
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::with_capacity::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::with_capacity::<_>
1705
1706
    /// Fixes up an insertion index returned by the [`RawTableInner::find_insert_index_in_group`] method.
1707
    ///
1708
    /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
1709
    /// bytes outside the range of the table are filled with [`Tag::EMPTY`] entries. These will unfortunately
1710
    /// trigger a match of [`RawTableInner::find_insert_index_in_group`] function. This is because
1711
    /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
1712
    /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
1713
    /// We detect this situation here and perform a second scan starting at the beginning of the table.
1714
    /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
1715
    /// trailing control bytes (containing [`Tag::EMPTY`] bytes).
1716
    ///
1717
    /// If this function is called correctly, it is guaranteed to return an index of an empty or
1718
    /// deleted bucket in the range `0..self.buckets()` (see `Warning` and `Safety`).
1719
    ///
1720
    /// # Warning
1721
    ///
1722
    /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
1723
    /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
1724
    /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
1725
    /// index will cause immediate [`undefined behavior`].
1726
    ///
1727
    /// # Safety
1728
    ///
1729
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
1730
    /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
1731
    /// of this crate, the following rules are necessary and sufficient:
1732
    ///
1733
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
1734
    ///   function results in [`undefined behavior`].
1735
    ///
1736
    /// * This function must only be used on insertion indices found by [`RawTableInner::find_insert_index_in_group`]
1737
    ///   (after the `find_insert_index_in_group` function, but before insertion into the table).
1738
    ///
1739
    /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
1740
    ///   (this one is provided by the [`RawTableInner::find_insert_index_in_group`] function).
1741
    ///
1742
    /// Calling this function with an index not provided by [`RawTableInner::find_insert_index_in_group`]
1743
    /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
1744
    /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
1745
    ///
1746
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1747
    /// [`RawTableInner::find_insert_index_in_group`]: RawTableInner::find_insert_index_in_group
1748
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1749
    #[inline]
1750
1.20M
    unsafe fn fix_insert_index(&self, mut index: usize) -> usize {
1751
        // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1752
1.20M
        if unlikely(self.is_bucket_full(index)) {
1753
1.68k
            debug_assert!(self.bucket_mask < Group::WIDTH);
1754
            // SAFETY:
1755
            //
1756
            // * Since the caller of this function ensures that the control bytes are properly
1757
            //   initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1758
            //   bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1759
            //   and points to the properly initialized control bytes (see also
1760
            //   `TableLayout::calculate_layout_for` and `ptr::read`);
1761
            //
1762
            // * Because the caller of this function ensures that the index was provided by the
1763
            //   `self.find_insert_index_in_group()` function, so for for tables larger than the
1764
            //   group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1765
            //   branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_index_in_group`
1766
            //   cannot return a full bucket index. For tables smaller than the group width, calling
1767
            //   the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1768
            //   the range of the table are filled with EMPTY bytes (and we know for sure that there
1769
            //   is at least one FULL bucket), so this second scan either finds an empty slot (due to
1770
            //   the load factor) or hits the trailing control bytes (containing EMPTY).
1771
1.68k
            index = Group::load_aligned(self.ctrl(0))
1772
1.68k
                .match_empty_or_deleted()
1773
1.68k
                .lowest_set_bit()
1774
1.68k
                .unwrap_unchecked();
1775
1.20M
        }
1776
1.20M
        index
1777
1.20M
    }
<hashbrown::raw::RawTableInner>::fix_insert_index
Line
Count
Source
1750
1.20M
    unsafe fn fix_insert_index(&self, mut index: usize) -> usize {
1751
        // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1752
1.20M
        if unlikely(self.is_bucket_full(index)) {
1753
1.68k
            debug_assert!(self.bucket_mask < Group::WIDTH);
1754
            // SAFETY:
1755
            //
1756
            // * Since the caller of this function ensures that the control bytes are properly
1757
            //   initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1758
            //   bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1759
            //   and points to the properly initialized control bytes (see also
1760
            //   `TableLayout::calculate_layout_for` and `ptr::read`);
1761
            //
1762
            // * Because the caller of this function ensures that the index was provided by the
1763
            //   `self.find_insert_index_in_group()` function, so for for tables larger than the
1764
            //   group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1765
            //   branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_index_in_group`
1766
            //   cannot return a full bucket index. For tables smaller than the group width, calling
1767
            //   the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1768
            //   the range of the table are filled with EMPTY bytes (and we know for sure that there
1769
            //   is at least one FULL bucket), so this second scan either finds an empty slot (due to
1770
            //   the load factor) or hits the trailing control bytes (containing EMPTY).
1771
1.68k
            index = Group::load_aligned(self.ctrl(0))
1772
1.68k
                .match_empty_or_deleted()
1773
1.68k
                .lowest_set_bit()
1774
1.68k
                .unwrap_unchecked();
1775
1.20M
        }
1776
1.20M
        index
1777
1.20M
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fix_insert_index
1778
1779
    /// Finds the position to insert something in a group.
1780
    ///
1781
    /// **This may have false positives and must be fixed up with `fix_insert_index`
1782
    /// before it's used.**
1783
    ///
1784
    /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
1785
    /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
1786
    #[inline]
1787
1.23M
    fn find_insert_index_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1788
1.23M
        let bit = group.match_empty_or_deleted().lowest_set_bit();
1789
1790
1.23M
        if likely(bit.is_some()) {
1791
            // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1792
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1793
1.20M
            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1794
        } else {
1795
30.4k
            None
1796
        }
1797
1.23M
    }
<hashbrown::raw::RawTableInner>::find_insert_index_in_group
Line
Count
Source
1787
1.23M
    fn find_insert_index_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1788
1.23M
        let bit = group.match_empty_or_deleted().lowest_set_bit();
1789
1790
1.23M
        if likely(bit.is_some()) {
1791
            // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1792
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1793
1.20M
            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1794
        } else {
1795
30.4k
            None
1796
        }
1797
1.23M
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_index_in_group
1798
1799
    /// Searches for an element in the table, or a potential slot where that element could
1800
    /// be inserted (an empty or deleted [`Bucket`] index).
1801
    ///
1802
    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1803
    /// eliminated by LLVM optimizations.
1804
    ///
1805
    /// This function does not make any changes to the `data` part of the table, or any
1806
    /// changes to the `items` or `growth_left` field of the table.
1807
    ///
1808
    /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
1809
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
1810
    /// will never return (will go into an infinite loop) for tables larger than the group
1811
    /// width, or return an index outside of the table indices range if the table is less
1812
    /// than the group width.
1813
    ///
1814
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1815
    /// function with only `FULL` buckets' indices and return the `index` of the found
1816
    /// element (as `Ok(index)`). If the element is not found and there is at least 1
1817
    /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
1818
    /// an index in the range `0..self.buckets()`, but in any case, if this function
1819
    /// returns `Err`, it will contain an index in the range `0..=self.buckets()`.
1820
    ///
1821
    /// # Safety
1822
    ///
1823
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1824
    /// this function results in [`undefined behavior`].
1825
    ///
1826
    /// Attempt to write data at the index returned by this function when the table is less than
1827
    /// the group width and if there was not at least one empty or deleted bucket in the table
1828
    /// will cause immediate [`undefined behavior`]. This is because in this case the function
1829
    /// will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`] control
1830
    /// bytes outside the table range.
1831
    ///
1832
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1833
    #[inline]
1834
421k
    unsafe fn find_or_find_insert_index_inner(
1835
421k
        &self,
1836
421k
        hash: u64,
1837
421k
        eq: &mut dyn FnMut(usize) -> bool,
1838
421k
    ) -> Result<usize, usize> {
1839
421k
        let mut insert_index = None;
1840
1841
421k
        let tag_hash = Tag::full(hash);
1842
421k
        let mut probe_seq = self.probe_seq(hash);
1843
1844
        loop {
1845
            // SAFETY:
1846
            // * Caller of this function ensures that the control bytes are properly initialized.
1847
            //
1848
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1849
            //   of the table due to masking with `self.bucket_mask` and also because the number
1850
            //   of buckets is a power of two (see `self.probe_seq` function).
1851
            //
1852
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1853
            //   call `Group::load` due to the extended control bytes range, which is
1854
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1855
            //   byte will never be read for the allocated table);
1856
            //
1857
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1858
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1859
            //   bytes, which is safe (see RawTableInner::new).
1860
447k
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1861
1862
447k
            for bit in group.match_tag(tag_hash) {
1863
34.0k
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1864
1865
34.0k
                if likely(eq(index)) {
1866
0
                    return Ok(index);
1867
34.0k
                }
1868
            }
1869
1870
            // We didn't find the element we were looking for in the group, try to get an
1871
            // insertion slot from the group if we don't have one yet.
1872
447k
            if likely(insert_index.is_none()) {
1873
447k
                insert_index = self.find_insert_index_in_group(&group, &probe_seq);
1874
447k
            }
1875
1876
447k
            if let Some(insert_index) = insert_index {
1877
                // Only stop the search if the group contains at least one empty element.
1878
                // Otherwise, the element that we are looking for might be in a following group.
1879
421k
                if likely(group.match_empty().any_bit_set()) {
1880
                    // We must have found a insert slot by now, since the current group contains at
1881
                    // least one. For tables smaller than the group width, there will still be an
1882
                    // empty element in the current (and only) group due to the load factor.
1883
                    unsafe {
1884
                        // SAFETY:
1885
                        // * Caller of this function ensures that the control bytes are properly initialized.
1886
                        //
1887
                        // * We use this function with the index found by `self.find_insert_index_in_group`
1888
421k
                        return Err(self.fix_insert_index(insert_index));
1889
                    }
1890
3
                }
1891
25.8k
            }
1892
1893
25.8k
            probe_seq.move_next(self.bucket_mask);
1894
        }
1895
421k
    }
<hashbrown::raw::RawTableInner>::find_or_find_insert_index_inner
Line
Count
Source
1834
421k
    unsafe fn find_or_find_insert_index_inner(
1835
421k
        &self,
1836
421k
        hash: u64,
1837
421k
        eq: &mut dyn FnMut(usize) -> bool,
1838
421k
    ) -> Result<usize, usize> {
1839
421k
        let mut insert_index = None;
1840
1841
421k
        let tag_hash = Tag::full(hash);
1842
421k
        let mut probe_seq = self.probe_seq(hash);
1843
1844
        loop {
1845
            // SAFETY:
1846
            // * Caller of this function ensures that the control bytes are properly initialized.
1847
            //
1848
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1849
            //   of the table due to masking with `self.bucket_mask` and also because the number
1850
            //   of buckets is a power of two (see `self.probe_seq` function).
1851
            //
1852
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1853
            //   call `Group::load` due to the extended control bytes range, which is
1854
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1855
            //   byte will never be read for the allocated table);
1856
            //
1857
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1858
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1859
            //   bytes, which is safe (see RawTableInner::new).
1860
447k
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1861
1862
447k
            for bit in group.match_tag(tag_hash) {
1863
34.0k
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1864
1865
34.0k
                if likely(eq(index)) {
1866
0
                    return Ok(index);
1867
34.0k
                }
1868
            }
1869
1870
            // We didn't find the element we were looking for in the group, try to get an
1871
            // insertion slot from the group if we don't have one yet.
1872
447k
            if likely(insert_index.is_none()) {
1873
447k
                insert_index = self.find_insert_index_in_group(&group, &probe_seq);
1874
447k
            }
1875
1876
447k
            if let Some(insert_index) = insert_index {
1877
                // Only stop the search if the group contains at least one empty element.
1878
                // Otherwise, the element that we are looking for might be in a following group.
1879
421k
                if likely(group.match_empty().any_bit_set()) {
1880
                    // We must have found a insert slot by now, since the current group contains at
1881
                    // least one. For tables smaller than the group width, there will still be an
1882
                    // empty element in the current (and only) group due to the load factor.
1883
                    unsafe {
1884
                        // SAFETY:
1885
                        // * Caller of this function ensures that the control bytes are properly initialized.
1886
                        //
1887
                        // * We use this function with the index found by `self.find_insert_index_in_group`
1888
421k
                        return Err(self.fix_insert_index(insert_index));
1889
                    }
1890
3
                }
1891
25.8k
            }
1892
1893
25.8k
            probe_seq.move_next(self.bucket_mask);
1894
        }
1895
421k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_or_find_insert_index_inner
1896
1897
    /// Searches for an empty or deleted bucket which is suitable for inserting a new
1898
    /// element and sets the hash for that slot. Returns an index of that slot and the
1899
    /// old control byte stored in the found index.
1900
    ///
1901
    /// This function does not check if the given element exists in the table. Also,
1902
    /// this function does not check if there is enough space in the table to insert
1903
    /// a new element. The caller of the function must make sure that the table has at
1904
    /// least 1 empty or deleted `bucket`, otherwise this function will never return
1905
    /// (will go into an infinite loop) for tables larger than the group width, or
1906
    /// return an index outside of the table indices range if the table is less than
1907
    /// the group width.
1908
    ///
1909
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1910
    /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
1911
    /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
1912
    ///
1913
    /// This function does not make any changes to the `data` parts of the table,
1914
    /// or any changes to the `items` or `growth_left` field of the table.
1915
    ///
1916
    /// # Safety
1917
    ///
1918
    /// The safety rules are directly derived from the safety rules for the
1919
    /// [`RawTableInner::set_ctrl_hash`] and [`RawTableInner::find_insert_index`] methods.
1920
    /// Thus, in order to uphold the safety contracts for that methods, as well as for
1921
    /// the correct logic of the work of this crate, you must observe the following rules
1922
    /// when calling this function:
1923
    ///
1924
    /// * The [`RawTableInner`] has already been allocated and has properly initialized
1925
    ///   control bytes otherwise calling this function results in [`undefined behavior`].
1926
    ///
1927
    /// * The caller of this function must ensure that the "data" parts of the table
1928
    ///   will have an entry in the returned index (matching the given hash) right
1929
    ///   after calling this function.
1930
    ///
1931
    /// Attempt to write data at the `index` returned by this function when the table is
1932
    /// less than the group width and if there was not at least one empty or deleted bucket in
1933
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1934
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1935
    /// control bytes outside the table range.
1936
    ///
1937
    /// The caller must independently increase the `items` field of the table, and also,
1938
    /// if the old control byte was [`Tag::EMPTY`], then decrease the table's `growth_left`
1939
    /// field, and do not change it if the old control byte was [`Tag::DELETED`].
1940
    ///
1941
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1942
    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
1943
    ///
1944
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1945
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1946
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1947
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
1948
    /// [`RawTableInner::find_insert_index`]: RawTableInner::find_insert_index
1949
    #[inline]
1950
725k
    unsafe fn prepare_insert_index(&mut self, hash: u64) -> (usize, Tag) {
1951
        // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
1952
725k
        let index: usize = self.find_insert_index(hash);
1953
        // SAFETY:
1954
        // 1. The `find_insert_index` function either returns an `index` less than or
1955
        //    equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
1956
        //    returns if it cannot find an empty or deleted slot.
1957
        // 2. The caller of this function guarantees that the table has already been
1958
        //    allocated
1959
725k
        let old_ctrl = *self.ctrl(index);
1960
725k
        self.set_ctrl_hash(index, hash);
1961
725k
        (index, old_ctrl)
1962
725k
    }
<hashbrown::raw::RawTableInner>::prepare_insert_index
Line
Count
Source
1950
725k
    unsafe fn prepare_insert_index(&mut self, hash: u64) -> (usize, Tag) {
1951
        // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
1952
725k
        let index: usize = self.find_insert_index(hash);
1953
        // SAFETY:
1954
        // 1. The `find_insert_index` function either returns an `index` less than or
1955
        //    equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
1956
        //    returns if it cannot find an empty or deleted slot.
1957
        // 2. The caller of this function guarantees that the table has already been
1958
        //    allocated
1959
725k
        let old_ctrl = *self.ctrl(index);
1960
725k
        self.set_ctrl_hash(index, hash);
1961
725k
        (index, old_ctrl)
1962
725k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_insert_index
1963
1964
    /// Searches for an empty or deleted bucket which is suitable for inserting
1965
    /// a new element, returning the `index` for the new [`Bucket`].
1966
    ///
1967
    /// This function does not make any changes to the `data` part of the table, or any
1968
    /// changes to the `items` or `growth_left` field of the table.
1969
    ///
1970
    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
1971
    /// will never return (will go into an infinite loop) for tables larger than the group
1972
    /// width, or return an index outside of the table indices range if the table is less
1973
    /// than the group width.
1974
    ///
1975
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1976
    /// guaranteed to return an index in the range `0..self.buckets()`, but in any case,
1977
    /// it will contain an index in the range `0..=self.buckets()`.
1978
    ///
1979
    /// # Safety
1980
    ///
1981
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1982
    /// this function results in [`undefined behavior`].
1983
    ///
1984
    /// Attempt to write data at the index returned by this function when the table is
1985
    /// less than the group width and if there was not at least one empty or deleted bucket in
1986
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1987
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1988
    /// control bytes outside the table range.
1989
    ///
1990
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1991
    #[inline]
1992
786k
    unsafe fn find_insert_index(&self, hash: u64) -> usize {
1993
786k
        let mut probe_seq = self.probe_seq(hash);
1994
        loop {
1995
            // SAFETY:
1996
            // * Caller of this function ensures that the control bytes are properly initialized.
1997
            //
1998
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1999
            //   of the table due to masking with `self.bucket_mask` and also because the number
2000
            //   of buckets is a power of two (see `self.probe_seq` function).
2001
            //
2002
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
2003
            //   call `Group::load` due to the extended control bytes range, which is
2004
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
2005
            //   byte will never be read for the allocated table);
2006
            //
2007
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
2008
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
2009
            //   bytes, which is safe (see RawTableInner::new).
2010
791k
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
2011
2012
791k
            let index = self.find_insert_index_in_group(&group, &probe_seq);
2013
791k
            if likely(index.is_some()) {
2014
                // SAFETY:
2015
                // * Caller of this function ensures that the control bytes are properly initialized.
2016
                //
2017
                // * We use this function with the slot / index found by `self.find_insert_index_in_group`
2018
                unsafe {
2019
786k
                    return self.fix_insert_index(index.unwrap_unchecked());
2020
                }
2021
4.63k
            }
2022
4.63k
            probe_seq.move_next(self.bucket_mask);
2023
        }
2024
786k
    }
<hashbrown::raw::RawTableInner>::find_insert_index
Line
Count
Source
1992
786k
    unsafe fn find_insert_index(&self, hash: u64) -> usize {
1993
786k
        let mut probe_seq = self.probe_seq(hash);
1994
        loop {
1995
            // SAFETY:
1996
            // * Caller of this function ensures that the control bytes are properly initialized.
1997
            //
1998
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1999
            //   of the table due to masking with `self.bucket_mask` and also because the number
2000
            //   of buckets is a power of two (see `self.probe_seq` function).
2001
            //
2002
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
2003
            //   call `Group::load` due to the extended control bytes range, which is
2004
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
2005
            //   byte will never be read for the allocated table);
2006
            //
2007
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
2008
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
2009
            //   bytes, which is safe (see RawTableInner::new).
2010
791k
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
2011
2012
791k
            let index = self.find_insert_index_in_group(&group, &probe_seq);
2013
791k
            if likely(index.is_some()) {
2014
                // SAFETY:
2015
                // * Caller of this function ensures that the control bytes are properly initialized.
2016
                //
2017
                // * We use this function with the slot / index found by `self.find_insert_index_in_group`
2018
                unsafe {
2019
786k
                    return self.fix_insert_index(index.unwrap_unchecked());
2020
                }
2021
4.63k
            }
2022
4.63k
            probe_seq.move_next(self.bucket_mask);
2023
        }
2024
786k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_index
2025
2026
    /// Searches for an element in a table, returning the `index` of the found element.
2027
    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
2028
    /// eliminated by LLVM optimizations.
2029
    ///
2030
    /// This function does not make any changes to the `data` part of the table, or any
2031
    /// changes to the `items` or `growth_left` field of the table.
2032
    ///
2033
    /// The table must have at least 1 empty `bucket`, otherwise, if the
2034
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
2035
    /// this function will also never return (will go into an infinite loop).
2036
    ///
2037
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
2038
    /// function with only `FULL` buckets' indices and return the `index` of the found
2039
    /// element as `Some(index)`, so the index will always be in the range
2040
    /// `0..self.buckets()`.
2041
    ///
2042
    /// # Safety
2043
    ///
2044
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
2045
    /// this function results in [`undefined behavior`].
2046
    ///
2047
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2048
    #[inline(always)]
2049
1.15M
    unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
2050
1.15M
        let tag_hash = Tag::full(hash);
2051
1.15M
        let mut probe_seq = self.probe_seq(hash);
2052
2053
        loop {
2054
            // SAFETY:
2055
            // * Caller of this function ensures that the control bytes are properly initialized.
2056
            //
2057
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
2058
            //   of the table due to masking with `self.bucket_mask`.
2059
            //
2060
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
2061
            //   call `Group::load` due to the extended control bytes range, which is
2062
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
2063
            //   byte will never be read for the allocated table);
2064
            //
2065
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
2066
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
2067
            //   bytes, which is safe (see RawTableInner::new_in).
2068
1.21M
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
2069
2070
1.21M
            for bit in group.match_tag(tag_hash) {
2071
                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
2072
                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2073
993k
                let index = (probe_seq.pos + bit) & self.bucket_mask;
2074
2075
993k
                if likely(eq(index)) {
2076
974k
                    return Some(index);
2077
19.2k
                }
2078
            }
2079
2080
243k
            if likely(group.match_empty().any_bit_set()) {
2081
185k
                return None;
2082
58.5k
            }
2083
2084
58.5k
            probe_seq.move_next(self.bucket_mask);
2085
        }
2086
1.15M
    }
2087
2088
    /// Prepares for rehashing data in place (that is, without allocating new memory).
2089
    /// Converts all full index `control bytes` to `Tag::DELETED` and all `Tag::DELETED` control
2090
    /// bytes to `Tag::EMPTY`, i.e. performs the following conversion:
2091
    ///
2092
    /// - `Tag::EMPTY` control bytes   -> `Tag::EMPTY`;
2093
    /// - `Tag::DELETED` control bytes -> `Tag::EMPTY`;
2094
    /// - `FULL` control bytes    -> `Tag::DELETED`.
2095
    ///
2096
    /// This function does not make any changes to the `data` parts of the table,
2097
    /// or any changes to the `items` or `growth_left` field of the table.
2098
    ///
2099
    /// # Safety
2100
    ///
2101
    /// You must observe the following safety rules when calling this function:
2102
    ///
2103
    /// * The [`RawTableInner`] has already been allocated;
2104
    ///
2105
    /// * The caller of this function must convert the `Tag::DELETED` bytes back to `FULL`
2106
    ///   bytes when re-inserting them into their ideal position (which was impossible
2107
    ///   to do during the first insert due to tombstones). If the caller does not do
2108
    ///   this, then calling this function may result in a memory leak.
2109
    ///
2110
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
2111
    ///   calling this function results in [`undefined behavior`].
2112
    ///
2113
    /// Calling this function on a table that has not been allocated results in
2114
    /// [`undefined behavior`].
2115
    ///
2116
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2117
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2118
    ///
2119
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2120
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2121
    #[allow(clippy::mut_mut)]
2122
    #[inline]
2123
0
    unsafe fn prepare_rehash_in_place(&mut self) {
2124
        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
2125
        // This effectively frees up all buckets containing a DELETED entry.
2126
        //
2127
        // SAFETY:
2128
        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
2129
        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
2130
        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
2131
        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
2132
        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
2133
        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
2134
0
        for i in (0..self.buckets()).step_by(Group::WIDTH) {
2135
0
            let group = Group::load_aligned(self.ctrl(i));
2136
0
            let group = group.convert_special_to_empty_and_full_to_deleted();
2137
0
            group.store_aligned(self.ctrl(i));
2138
0
        }
2139
2140
        // Fix up the trailing control bytes. See the comments in set_ctrl
2141
        // for the handling of tables smaller than the group width.
2142
        //
2143
        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
2144
        // has already been allocated
2145
0
        if unlikely(self.buckets() < Group::WIDTH) {
2146
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
2147
0
            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
2148
0
            // `Group::WIDTH` is safe
2149
0
            self.ctrl(0)
2150
0
                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
2151
0
        } else {
2152
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
2153
0
            // control bytes,so copying `Group::WIDTH` bytes with offset equal
2154
0
            // to `self.buckets() == self.bucket_mask + 1` is safe
2155
0
            self.ctrl(0)
2156
0
                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2157
0
        }
2158
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
2159
2160
    /// Returns an iterator over every element in the table.
2161
    ///
2162
    /// # Safety
2163
    ///
2164
    /// If any of the following conditions are violated, the result
2165
    /// is [`undefined behavior`]:
2166
    ///
2167
    /// * The caller has to ensure that the `RawTableInner` outlives the
2168
    ///   `RawIter`. Because we cannot make the `next` method unsafe on
2169
    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
2170
    ///
2171
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2172
    ///
2173
    /// The type `T` must be the actual type of the elements stored in the table,
2174
    /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
2175
    ///
2176
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2177
    #[inline]
2178
0
    unsafe fn iter<T>(&self) -> RawIter<T> {
2179
        // SAFETY:
2180
        // 1. Since the caller of this function ensures that the control bytes
2181
        //    are properly initialized and `self.data_end()` points to the start
2182
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2183
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2184
        //    control bytes.
2185
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2186
        //    equal to zero).
2187
        // 3. We pass the exact value of buckets of the table to the function.
2188
        //
2189
        //                         `ctrl` points here (to the start
2190
        //                         of the first control byte `CT0`)
2191
        //                          ∨
2192
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2193
        //                           \________  ________/
2194
        //                                    \/
2195
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2196
        //
2197
        // where: T0...T_n  - our stored data;
2198
        //        CT0...CT_n - control bytes or metadata for `data`.
2199
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2200
        //                        with loading `Group` bytes from the heap works properly, even if the result
2201
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2202
        //                        `RawTableInner::set_ctrl` function.
2203
        //
2204
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2205
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2206
0
        let data = Bucket::from_base_index(self.data_end(), 0);
2207
0
        RawIter {
2208
0
            // SAFETY: See explanation above
2209
0
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2210
0
            items: self.items,
2211
0
        }
2212
0
    }
2213
2214
    /// Executes the destructors (if any) of the values stored in the table.
2215
    ///
2216
    /// # Note
2217
    ///
2218
    /// This function does not erase the control bytes of the table and does
2219
    /// not make any changes to the `items` or `growth_left` fields of the
2220
    /// table. If necessary, the caller of this function must manually set
2221
    /// up these table fields, for example using the [`clear_no_drop`] function.
2222
    ///
2223
    /// Be careful during calling this function, because drop function of
2224
    /// the elements can panic, and this can leave table in an inconsistent
2225
    /// state.
2226
    ///
2227
    /// # Safety
2228
    ///
2229
    /// The type `T` must be the actual type of the elements stored in the table,
2230
    /// otherwise calling this function may result in [`undefined behavior`].
2231
    ///
2232
    /// If `T` is a type that should be dropped and **the table is not empty**,
2233
    /// calling this function more than once results in [`undefined behavior`].
2234
    ///
2235
    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2236
    /// calling this function may result in [`undefined behavior`].
2237
    ///
2238
    /// It is safe to call this function on a table that has not been allocated,
2239
    /// on a table with uninitialized control bytes, and on a table with no actual
2240
    /// data but with `Full` control bytes if `self.items == 0`.
2241
    ///
2242
    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2243
    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2244
    /// [`RawTableInner`].
2245
    ///
2246
    /// [`Bucket::drop`]: Bucket::drop
2247
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2248
    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2249
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2250
6.43k
    unsafe fn drop_elements<T>(&mut self) {
2251
        // Check that `self.items != 0`. Protects against the possibility
2252
        // of creating an iterator on an table with uninitialized control bytes.
2253
6.43k
        if T::NEEDS_DROP && self.items != 0 {
2254
            // SAFETY: We know for sure that RawTableInner will outlive the
2255
            // returned `RawIter` iterator, and the caller of this function
2256
            // must uphold the safety contract for `drop_elements` method.
2257
0
            for item in self.iter::<T>() {
2258
0
                // SAFETY: The caller must uphold the safety contract for
2259
0
                // `drop_elements` method.
2260
0
                item.drop();
2261
0
            }
2262
6.43k
        }
2263
6.43k
    }
<hashbrown::raw::RawTableInner>::drop_elements::<usize>
Line
Count
Source
2250
6.43k
    unsafe fn drop_elements<T>(&mut self) {
2251
        // Check that `self.items != 0`. Protects against the possibility
2252
        // of creating an iterator on an table with uninitialized control bytes.
2253
6.43k
        if T::NEEDS_DROP && self.items != 0 {
2254
            // SAFETY: We know for sure that RawTableInner will outlive the
2255
            // returned `RawIter` iterator, and the caller of this function
2256
            // must uphold the safety contract for `drop_elements` method.
2257
0
            for item in self.iter::<T>() {
2258
0
                // SAFETY: The caller must uphold the safety contract for
2259
0
                // `drop_elements` method.
2260
0
                item.drop();
2261
0
            }
2262
6.43k
        }
2263
6.43k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<_>
2264
2265
    /// Executes the destructors (if any) of the values stored in the table and than
2266
    /// deallocates the table.
2267
    ///
2268
    /// # Note
2269
    ///
2270
    /// Calling this function automatically makes invalid (dangling) all instances of
2271
    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2272
    ///
2273
    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2274
    /// fields of the table. If necessary, the caller of this function must manually set
2275
    /// up these table fields.
2276
    ///
2277
    /// # Safety
2278
    ///
2279
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2280
    ///
2281
    /// * Calling this function more than once;
2282
    ///
2283
    /// * The type `T` must be the actual type of the elements stored in the table.
2284
    ///
2285
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2286
    ///   to allocate this table.
2287
    ///
2288
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2289
    ///   was used to allocate this table.
2290
    ///
2291
    /// The caller of this function should pay attention to the possibility of the
2292
    /// elements' drop function panicking, because this:
2293
    ///
2294
    ///    * May leave the table in an inconsistent state;
2295
    ///
2296
    ///    * Memory is never deallocated, so a memory leak may occur.
2297
    ///
2298
    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2299
    /// function results in [`undefined behavior`].
2300
    ///
2301
    /// It is safe to call this function on a table that has not been allocated,
2302
    /// on a table with uninitialized control bytes, and on a table with no actual
2303
    /// data but with `Full` control bytes if `self.items == 0`.
2304
    ///
2305
    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2306
    /// for more  information.
2307
    ///
2308
    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2309
    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2310
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2311
12.7k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2312
12.7k
        if !self.is_empty_singleton() {
2313
6.43k
            unsafe {
2314
6.43k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2315
6.43k
                self.drop_elements::<T>();
2316
6.43k
                // SAFETY:
2317
6.43k
                // 1. We have checked that our table is allocated.
2318
6.43k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2319
6.43k
                self.free_buckets(alloc, table_layout);
2320
6.43k
            }
2321
6.35k
        }
2322
12.7k
    }
<hashbrown::raw::RawTableInner>::drop_inner_table::<usize, hashbrown::raw::alloc::inner::Global>
Line
Count
Source
2311
12.7k
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2312
12.7k
        if !self.is_empty_singleton() {
2313
6.43k
            unsafe {
2314
6.43k
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2315
6.43k
                self.drop_elements::<T>();
2316
6.43k
                // SAFETY:
2317
6.43k
                // 1. We have checked that our table is allocated.
2318
6.43k
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2319
6.43k
                self.free_buckets(alloc, table_layout);
2320
6.43k
            }
2321
6.35k
        }
2322
12.7k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<_, _>
2323
2324
    /// Returns a pointer to an element in the table (convenience for
2325
    /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
2326
    ///
2327
    /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
2328
    /// otherwise using it may result in [`undefined behavior`].
2329
    ///
2330
    /// # Safety
2331
    ///
2332
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
2333
    /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
2334
    /// this function, the following safety rules must be observed:
2335
    ///
2336
    /// * The table must already be allocated;
2337
    ///
2338
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2339
    ///   function, i.e. `(index + 1) <= self.buckets()`.
2340
    ///
2341
    /// * The type `T` must be the actual type of the elements stored in the table, otherwise
2342
    ///   using the returned [`Bucket`] may result in [`undefined behavior`].
2343
    ///
2344
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
2345
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
2346
    ///
2347
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
2348
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
2349
    /// `(index + 1) <= self.buckets()`.
2350
    ///
2351
    /// ```none
2352
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2353
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2354
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2355
    ///
2356
    ///           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
2357
    ///           part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
2358
    ///                  |
2359
    ///                  |               `base = table.data_end::<T>()` points here
2360
    ///                  |               (to the start of CT0 or to the end of T0)
2361
    ///                  v                 v
2362
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2363
    ///                     ^                                              \__________  __________/
2364
    ///        `table.bucket(3)` returns a pointer that points                        \/
2365
    ///         here in the `data` part of the `RawTableInner`             additional control bytes
2366
    ///         (to the end of T3)                                          `m = Group::WIDTH - 1`
2367
    ///
2368
    /// where: T0...T_n  - our stored data;
2369
    ///        CT0...CT_n - control bytes or metadata for `data`;
2370
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2371
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2372
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2373
    ///
2374
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2375
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2376
    /// ```
2377
    ///
2378
    /// [`Bucket::from_base_index`]: Bucket::from_base_index
2379
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2380
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2381
    #[inline]
2382
725k
    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2383
725k
        debug_assert_ne!(self.bucket_mask, 0);
2384
725k
        debug_assert!(index < self.buckets());
2385
725k
        Bucket::from_base_index(self.data_end(), index)
2386
725k
    }
<hashbrown::raw::RawTableInner>::bucket::<usize>
Line
Count
Source
2382
725k
    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2383
725k
        debug_assert_ne!(self.bucket_mask, 0);
2384
725k
        debug_assert!(index < self.buckets());
2385
725k
        Bucket::from_base_index(self.data_end(), index)
2386
725k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<_>
2387
2388
    /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
2389
    /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
2390
    ///
2391
    /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
2392
    /// otherwise using it may result in [`undefined behavior`].
2393
    ///
2394
    /// # Safety
2395
    ///
2396
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2397
    ///
2398
    /// * The table must already be allocated;
2399
    ///
2400
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2401
    ///   function, i.e. `(index + 1) <= self.buckets()`;
2402
    ///
2403
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2404
    ///
2405
    /// ```none
2406
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2407
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2408
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2409
    ///
2410
    ///           `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
2411
    ///           `data` part of the `RawTableInner`, i.e. to the start of T3
2412
    ///                  |
2413
    ///                  |               `base = table.data_end::<u8>()` points here
2414
    ///                  |               (to the start of CT0 or to the end of T0)
2415
    ///                  v                 v
2416
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2417
    ///                                                                    \__________  __________/
2418
    ///                                                                               \/
2419
    ///                                                                    additional control bytes
2420
    ///                                                                     `m = Group::WIDTH - 1`
2421
    ///
2422
    /// where: T0...T_n  - our stored data;
2423
    ///        CT0...CT_n - control bytes or metadata for `data`;
2424
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2425
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2426
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2427
    ///
2428
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2429
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2430
    /// ```
2431
    ///
2432
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2433
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2434
    #[inline]
2435
1.45M
    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2436
1.45M
        debug_assert_ne!(self.bucket_mask, 0);
2437
1.45M
        debug_assert!(index < self.buckets());
2438
1.45M
        let base: *mut u8 = self.data_end().as_ptr();
2439
1.45M
        base.sub((index + 1) * size_of)
2440
1.45M
    }
<hashbrown::raw::RawTableInner>::bucket_ptr
Line
Count
Source
2435
1.45M
    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2436
1.45M
        debug_assert_ne!(self.bucket_mask, 0);
2437
1.45M
        debug_assert!(index < self.buckets());
2438
1.45M
        let base: *mut u8 = self.data_end().as_ptr();
2439
1.45M
        base.sub((index + 1) * size_of)
2440
1.45M
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket_ptr
2441
2442
    /// Returns pointer to one past last `data` element in the table as viewed from
2443
    /// the start point of the allocation (convenience for `self.ctrl.cast()`).
2444
    ///
2445
    /// This function actually returns a pointer to the end of the `data element` at
2446
    /// index "0" (zero).
2447
    ///
2448
    /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
2449
    /// otherwise using it may result in [`undefined behavior`].
2450
    ///
2451
    /// # Note
2452
    ///
2453
    /// The type `T` must be the actual type of the elements stored in the table, otherwise
2454
    /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
2455
    ///
2456
    /// ```none
2457
    ///                        `table.data_end::<T>()` returns pointer that points here
2458
    ///                        (to the end of `T0`)
2459
    ///                          ∨
2460
    /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2461
    ///                           \________  ________/
2462
    ///                                    \/
2463
    ///       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2464
    ///
2465
    /// where: T0...T_n  - our stored data;
2466
    ///        CT0...CT_n - control bytes or metadata for `data`.
2467
    ///        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2468
    ///                        with loading `Group` bytes from the heap works properly, even if the result
2469
    ///                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2470
    ///                        `RawTableInner::set_ctrl` function.
2471
    ///
2472
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2473
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2474
    /// ```
2475
    ///
2476
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2477
    #[inline]
2478
2.17M
    fn data_end<T>(&self) -> NonNull<T> {
2479
2.17M
        self.ctrl.cast()
2480
2.17M
    }
<hashbrown::raw::RawTableInner>::data_end::<u8>
Line
Count
Source
2478
1.45M
    fn data_end<T>(&self) -> NonNull<T> {
2479
1.45M
        self.ctrl.cast()
2480
1.45M
    }
<hashbrown::raw::RawTableInner>::data_end::<usize>
Line
Count
Source
2478
725k
    fn data_end<T>(&self) -> NonNull<T> {
2479
725k
        self.ctrl.cast()
2480
725k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<_>
2481
2482
    /// Returns an iterator-like object for a probe sequence on the table.
2483
    ///
2484
    /// This iterator never terminates, but is guaranteed to visit each bucket
2485
    /// group exactly once. The loop using `probe_seq` must terminate upon
2486
    /// reaching a group containing an empty bucket.
2487
    #[inline]
2488
2.36M
    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2489
2.36M
        ProbeSeq {
2490
2.36M
            // This is the same as `hash as usize % self.buckets()` because the number
2491
2.36M
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2492
2.36M
            pos: h1(hash) & self.bucket_mask,
2493
2.36M
            stride: 0,
2494
2.36M
        }
2495
2.36M
    }
<hashbrown::raw::RawTableInner>::probe_seq
Line
Count
Source
2488
2.36M
    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2489
2.36M
        ProbeSeq {
2490
2.36M
            // This is the same as `hash as usize % self.buckets()` because the number
2491
2.36M
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2492
2.36M
            pos: h1(hash) & self.bucket_mask,
2493
2.36M
            stride: 0,
2494
2.36M
        }
2495
2.36M
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::probe_seq
2496
2497
    #[inline]
2498
478k
    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, new_ctrl: Tag) {
2499
478k
        self.growth_left -= usize::from(old_ctrl.special_is_empty());
2500
478k
        self.set_ctrl(index, new_ctrl);
2501
478k
        self.items += 1;
2502
478k
    }
<hashbrown::raw::RawTableInner>::record_item_insert_at
Line
Count
Source
2498
478k
    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, new_ctrl: Tag) {
2499
478k
        self.growth_left -= usize::from(old_ctrl.special_is_empty());
2500
478k
        self.set_ctrl(index, new_ctrl);
2501
478k
        self.items += 1;
2502
478k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::record_item_insert_at
2503
2504
    #[inline]
2505
0
    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2506
0
        let probe_seq_pos = self.probe_seq(hash).pos;
2507
0
        let probe_index =
2508
0
            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group::{closure#0}
2509
0
        probe_index(i) == probe_index(new_i)
2510
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
2511
2512
    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2513
    /// the end of the array.
2514
    ///
2515
    /// This function does not make any changes to the `data` parts of the table,
2516
    /// or any changes to the `items` or `growth_left` field of the table.
2517
    ///
2518
    /// # Safety
2519
    ///
2520
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2521
    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2522
    /// following rules when calling this function:
2523
    ///
2524
    /// * The [`RawTableInner`] has already been allocated;
2525
    ///
2526
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2527
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2528
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2529
    ///
2530
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2531
    ///
2532
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2533
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2534
    ///
2535
    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2536
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2537
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2538
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2539
    #[inline]
2540
725k
    unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) {
2541
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`]
2542
725k
        self.set_ctrl(index, Tag::full(hash));
2543
725k
    }
<hashbrown::raw::RawTableInner>::set_ctrl_hash
Line
Count
Source
2540
725k
    unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) {
2541
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`]
2542
725k
        self.set_ctrl(index, Tag::full(hash));
2543
725k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl_hash
2544
2545
    /// Replaces the hash in the control byte at the given index with the provided one,
2546
    /// and possibly also replicates the new control byte at the end of the array of control
2547
    /// bytes, returning the old control byte.
2548
    ///
2549
    /// This function does not make any changes to the `data` parts of the table,
2550
    /// or any changes to the `items` or `growth_left` field of the table.
2551
    ///
2552
    /// # Safety
2553
    ///
2554
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_hash`]
2555
    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2556
    /// methods, you must observe the following rules when calling this function:
2557
    ///
2558
    /// * The [`RawTableInner`] has already been allocated;
2559
    ///
2560
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2561
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2562
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2563
    ///
2564
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2565
    ///
2566
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2567
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2568
    ///
2569
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
2570
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2571
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2572
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2573
    #[inline]
2574
0
    unsafe fn replace_ctrl_hash(&mut self, index: usize, hash: u64) -> Tag {
2575
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`]
2576
0
        let prev_ctrl = *self.ctrl(index);
2577
0
        self.set_ctrl_hash(index, hash);
2578
0
        prev_ctrl
2579
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
2580
2581
    /// Sets a control byte, and possibly also the replicated control byte at
2582
    /// the end of the array.
2583
    ///
2584
    /// This function does not make any changes to the `data` parts of the table,
2585
    /// or any changes to the `items` or `growth_left` field of the table.
2586
    ///
2587
    /// # Safety
2588
    ///
2589
    /// You must observe the following safety rules when calling this function:
2590
    ///
2591
    /// * The [`RawTableInner`] has already been allocated;
2592
    ///
2593
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2594
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2595
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2596
    ///
2597
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2598
    ///
2599
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2600
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2601
    ///
2602
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2603
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2604
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2605
    #[inline]
2606
1.68M
    unsafe fn set_ctrl(&mut self, index: usize, ctrl: Tag) {
2607
        // Replicate the first Group::WIDTH control bytes at the end of
2608
        // the array without using a branch. If the tables smaller than
2609
        // the group width (self.buckets() < Group::WIDTH),
2610
        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2611
        //
2612
        // - If index >= Group::WIDTH then index == index2.
2613
        // - Otherwise index2 == self.bucket_mask + 1 + index.
2614
        //
2615
        // The very last replicated control byte is never actually read because
2616
        // we mask the initial index for unaligned loads, but we write it
2617
        // anyways because it makes the set_ctrl implementation simpler.
2618
        //
2619
        // If there are fewer buckets than Group::WIDTH then this code will
2620
        // replicate the buckets at the end of the trailing group. For example
2621
        // with 2 buckets and a group size of 4, the control bytes will look
2622
        // like this:
2623
        //
2624
        //     Real    |             Replicated
2625
        // ---------------------------------------------
2626
        // | [A] | [B] | [Tag::EMPTY] | [EMPTY] | [A] | [B] |
2627
        // ---------------------------------------------
2628
2629
        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2630
        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2631
1.68M
        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2632
2633
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2634
1.68M
        *self.ctrl(index) = ctrl;
2635
1.68M
        *self.ctrl(index2) = ctrl;
2636
1.68M
    }
<hashbrown::raw::RawTableInner>::set_ctrl
Line
Count
Source
2606
1.68M
    unsafe fn set_ctrl(&mut self, index: usize, ctrl: Tag) {
2607
        // Replicate the first Group::WIDTH control bytes at the end of
2608
        // the array without using a branch. If the tables smaller than
2609
        // the group width (self.buckets() < Group::WIDTH),
2610
        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2611
        //
2612
        // - If index >= Group::WIDTH then index == index2.
2613
        // - Otherwise index2 == self.bucket_mask + 1 + index.
2614
        //
2615
        // The very last replicated control byte is never actually read because
2616
        // we mask the initial index for unaligned loads, but we write it
2617
        // anyways because it makes the set_ctrl implementation simpler.
2618
        //
2619
        // If there are fewer buckets than Group::WIDTH then this code will
2620
        // replicate the buckets at the end of the trailing group. For example
2621
        // with 2 buckets and a group size of 4, the control bytes will look
2622
        // like this:
2623
        //
2624
        //     Real    |             Replicated
2625
        // ---------------------------------------------
2626
        // | [A] | [B] | [Tag::EMPTY] | [EMPTY] | [A] | [B] |
2627
        // ---------------------------------------------
2628
2629
        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2630
        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2631
1.68M
        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2632
2633
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2634
1.68M
        *self.ctrl(index) = ctrl;
2635
1.68M
        *self.ctrl(index2) = ctrl;
2636
1.68M
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl
2637
2638
    /// Returns a pointer to a control byte.
2639
    ///
2640
    /// # Safety
2641
    ///
2642
    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2643
    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2644
    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2645
    /// will return a pointer to the end of the allocated table and it is useless on its own.
2646
    ///
2647
    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2648
    /// table that has not been allocated results in [`Undefined Behavior`].
2649
    ///
2650
    /// So to satisfy both requirements you should always follow the rule that
2651
    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2652
    ///
2653
    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2654
    /// for read-only purpose.
2655
    ///
2656
    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2657
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2658
    ///
2659
    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2660
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2661
    #[inline]
2662
9.75M
    unsafe fn ctrl(&self, index: usize) -> *mut Tag {
2663
9.75M
        debug_assert!(index < self.num_ctrl_bytes());
2664
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2665
9.75M
        self.ctrl.as_ptr().add(index).cast()
2666
9.75M
    }
<hashbrown::raw::RawTableInner>::ctrl
Line
Count
Source
2662
9.75M
    unsafe fn ctrl(&self, index: usize) -> *mut Tag {
2663
9.75M
        debug_assert!(index < self.num_ctrl_bytes());
2664
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2665
9.75M
        self.ctrl.as_ptr().add(index).cast()
2666
9.75M
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::ctrl
2667
2668
    /// Gets the slice of all control bytes.
2669
27.0k
    fn ctrl_slice(&mut self) -> &mut [Tag] {
2670
        // SAFETY: We've initialized all control bytes, and have the correct number.
2671
27.0k
        unsafe { slice::from_raw_parts_mut(self.ctrl.as_ptr().cast(), self.num_ctrl_bytes()) }
2672
27.0k
    }
2673
2674
    #[inline]
2675
27.0k
    fn buckets(&self) -> usize {
2676
27.0k
        self.bucket_mask + 1
2677
27.0k
    }
<hashbrown::raw::RawTableInner>::buckets
Line
Count
Source
2675
27.0k
    fn buckets(&self) -> usize {
2676
27.0k
        self.bucket_mask + 1
2677
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::buckets
2678
2679
    /// Checks whether the bucket at `index` is full.
2680
    ///
2681
    /// # Safety
2682
    ///
2683
    /// The caller must ensure `index` is less than the number of buckets.
2684
    #[inline]
2685
1.20M
    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2686
1.20M
        debug_assert!(index < self.buckets());
2687
1.20M
        (*self.ctrl(index)).is_full()
2688
1.20M
    }
<hashbrown::raw::RawTableInner>::is_bucket_full
Line
Count
Source
2685
1.20M
    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2686
1.20M
        debug_assert!(index < self.buckets());
2687
1.20M
        (*self.ctrl(index)).is_full()
2688
1.20M
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_bucket_full
2689
2690
    #[inline]
2691
27.0k
    fn num_ctrl_bytes(&self) -> usize {
2692
27.0k
        self.bucket_mask + 1 + Group::WIDTH
2693
27.0k
    }
2694
2695
    #[inline]
2696
39.8k
    fn is_empty_singleton(&self) -> bool {
2697
39.8k
        self.bucket_mask == 0
2698
39.8k
    }
<hashbrown::raw::RawTableInner>::is_empty_singleton
Line
Count
Source
2696
39.8k
    fn is_empty_singleton(&self) -> bool {
2697
39.8k
        self.bucket_mask == 0
2698
39.8k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_empty_singleton
2699
2700
    /// Attempts to allocate a new hash table with at least enough capacity
2701
    /// for inserting the given number of elements without reallocating,
2702
    /// and return it inside `ScopeGuard` to protect against panic in the hash
2703
    /// function.
2704
    ///
2705
    /// # Note
2706
    ///
2707
    /// It is recommended (but not required):
2708
    ///
2709
    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2710
    ///
2711
    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2712
    ///   to allocate this table.
2713
    ///
2714
    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2715
    ///   to allocate this table.
2716
    ///
2717
    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2718
    /// this table, then using `mem::swap` with the `self` and the new table returned
2719
    /// by this function results in [`undefined behavior`].
2720
    ///
2721
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2722
    #[allow(clippy::mut_mut)]
2723
    #[inline]
2724
27.0k
    fn prepare_resize<'a, A>(
2725
27.0k
        &self,
2726
27.0k
        alloc: &'a A,
2727
27.0k
        table_layout: TableLayout,
2728
27.0k
        capacity: usize,
2729
27.0k
        fallibility: Fallibility,
2730
27.0k
    ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2731
27.0k
    where
2732
27.0k
        A: Allocator,
2733
    {
2734
27.0k
        debug_assert!(self.items <= capacity);
2735
2736
        // Allocate and initialize the new table.
2737
27.0k
        let new_table =
2738
27.0k
            RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2739
2740
        // The hash function may panic, in which case we simply free the new
2741
        // table without dropping any elements that may have been copied into
2742
        // it.
2743
        //
2744
        // This guard is also used to free the old table on success, see
2745
        // the comment at the bottom of this function.
2746
27.0k
        Ok(guard(new_table, move |self_| {
2747
27.0k
            if !self_.is_empty_singleton() {
2748
20.5k
                // SAFETY:
2749
20.5k
                // 1. We have checked that our table is allocated.
2750
20.5k
                // 2. We know for sure that the `alloc` and `table_layout` matches the
2751
20.5k
                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2752
20.5k
                unsafe { self_.free_buckets(alloc, table_layout) };
2753
20.5k
            }
2754
27.0k
        }))
<hashbrown::raw::RawTableInner>::prepare_resize::<hashbrown::raw::alloc::inner::Global>::{closure#0}
Line
Count
Source
2746
27.0k
        Ok(guard(new_table, move |self_| {
2747
27.0k
            if !self_.is_empty_singleton() {
2748
20.5k
                // SAFETY:
2749
20.5k
                // 1. We have checked that our table is allocated.
2750
20.5k
                // 2. We know for sure that the `alloc` and `table_layout` matches the
2751
20.5k
                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2752
20.5k
                unsafe { self_.free_buckets(alloc, table_layout) };
2753
20.5k
            }
2754
27.0k
        }))
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<_>::{closure#0}
2755
27.0k
    }
<hashbrown::raw::RawTableInner>::prepare_resize::<hashbrown::raw::alloc::inner::Global>
Line
Count
Source
2724
27.0k
    fn prepare_resize<'a, A>(
2725
27.0k
        &self,
2726
27.0k
        alloc: &'a A,
2727
27.0k
        table_layout: TableLayout,
2728
27.0k
        capacity: usize,
2729
27.0k
        fallibility: Fallibility,
2730
27.0k
    ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2731
27.0k
    where
2732
27.0k
        A: Allocator,
2733
    {
2734
27.0k
        debug_assert!(self.items <= capacity);
2735
2736
        // Allocate and initialize the new table.
2737
27.0k
        let new_table =
2738
27.0k
            RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2739
2740
        // The hash function may panic, in which case we simply free the new
2741
        // table without dropping any elements that may have been copied into
2742
        // it.
2743
        //
2744
        // This guard is also used to free the old table on success, see
2745
        // the comment at the bottom of this function.
2746
27.0k
        Ok(guard(new_table, move |self_| {
2747
            if !self_.is_empty_singleton() {
2748
                // SAFETY:
2749
                // 1. We have checked that our table is allocated.
2750
                // 2. We know for sure that the `alloc` and `table_layout` matches the
2751
                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2752
                unsafe { self_.free_buckets(alloc, table_layout) };
2753
            }
2754
        }))
2755
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<_>
2756
2757
    /// Reserves or rehashes to make room for `additional` more elements.
2758
    ///
2759
    /// This uses dynamic dispatch to reduce the amount of
2760
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2761
    ///
2762
    /// # Safety
2763
    ///
2764
    /// If any of the following conditions are violated, the result is
2765
    /// [`undefined behavior`]:
2766
    ///
2767
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2768
    ///   to allocate this table.
2769
    ///
2770
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2771
    ///   used to allocate this table.
2772
    ///
2773
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2774
    ///   the elements stored in the table.
2775
    ///
2776
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2777
    ///
2778
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2779
    #[allow(clippy::inline_always)]
2780
    #[inline(always)]
2781
27.0k
    unsafe fn reserve_rehash_inner<A>(
2782
27.0k
        &mut self,
2783
27.0k
        alloc: &A,
2784
27.0k
        additional: usize,
2785
27.0k
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2786
27.0k
        fallibility: Fallibility,
2787
27.0k
        layout: TableLayout,
2788
27.0k
        drop: Option<unsafe fn(*mut u8)>,
2789
27.0k
    ) -> Result<(), TryReserveError>
2790
27.0k
    where
2791
27.0k
        A: Allocator,
2792
    {
2793
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2794
27.0k
        let new_items = match self.items.checked_add(additional) {
2795
27.0k
            Some(new_items) => new_items,
2796
0
            None => return Err(fallibility.capacity_overflow()),
2797
        };
2798
27.0k
        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2799
27.0k
        if new_items <= full_capacity / 2 {
2800
            // Rehash in-place without re-allocating if we have plenty of spare
2801
            // capacity that is locked up due to DELETED entries.
2802
2803
            // SAFETY:
2804
            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2805
            //    (since new_items <= full_capacity / 2);
2806
            // 2. The caller ensures that `drop` function is the actual drop function of
2807
            //    the elements stored in the table.
2808
            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2809
            //    used to allocate this table.
2810
            // 4. The caller ensures that the control bytes of the `RawTableInner`
2811
            //    are already initialized.
2812
0
            self.rehash_in_place(hasher, layout.size, drop);
2813
0
            Ok(())
2814
        } else {
2815
            // Otherwise, conservatively resize to at least the next size up
2816
            // to avoid churning deletes into frequent rehashes.
2817
            //
2818
            // SAFETY:
2819
            // 1. We know for sure that `capacity >= self.items`.
2820
            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2821
            //    [`TableLayout`] that were used to allocate this table.
2822
            // 3. The caller ensures that the control bytes of the `RawTableInner`
2823
            //    are already initialized.
2824
27.0k
            self.resize_inner(
2825
27.0k
                alloc,
2826
27.0k
                usize::max(new_items, full_capacity + 1),
2827
27.0k
                hasher,
2828
27.0k
                fallibility,
2829
27.0k
                layout,
2830
            )
2831
        }
2832
27.0k
    }
<hashbrown::raw::RawTableInner>::reserve_rehash_inner::<hashbrown::raw::alloc::inner::Global>
Line
Count
Source
2781
27.0k
    unsafe fn reserve_rehash_inner<A>(
2782
27.0k
        &mut self,
2783
27.0k
        alloc: &A,
2784
27.0k
        additional: usize,
2785
27.0k
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2786
27.0k
        fallibility: Fallibility,
2787
27.0k
        layout: TableLayout,
2788
27.0k
        drop: Option<unsafe fn(*mut u8)>,
2789
27.0k
    ) -> Result<(), TryReserveError>
2790
27.0k
    where
2791
27.0k
        A: Allocator,
2792
    {
2793
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2794
27.0k
        let new_items = match self.items.checked_add(additional) {
2795
27.0k
            Some(new_items) => new_items,
2796
0
            None => return Err(fallibility.capacity_overflow()),
2797
        };
2798
27.0k
        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2799
27.0k
        if new_items <= full_capacity / 2 {
2800
            // Rehash in-place without re-allocating if we have plenty of spare
2801
            // capacity that is locked up due to DELETED entries.
2802
2803
            // SAFETY:
2804
            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2805
            //    (since new_items <= full_capacity / 2);
2806
            // 2. The caller ensures that `drop` function is the actual drop function of
2807
            //    the elements stored in the table.
2808
            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2809
            //    used to allocate this table.
2810
            // 4. The caller ensures that the control bytes of the `RawTableInner`
2811
            //    are already initialized.
2812
0
            self.rehash_in_place(hasher, layout.size, drop);
2813
0
            Ok(())
2814
        } else {
2815
            // Otherwise, conservatively resize to at least the next size up
2816
            // to avoid churning deletes into frequent rehashes.
2817
            //
2818
            // SAFETY:
2819
            // 1. We know for sure that `capacity >= self.items`.
2820
            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2821
            //    [`TableLayout`] that were used to allocate this table.
2822
            // 3. The caller ensures that the control bytes of the `RawTableInner`
2823
            //    are already initialized.
2824
27.0k
            self.resize_inner(
2825
27.0k
                alloc,
2826
27.0k
                usize::max(new_items, full_capacity + 1),
2827
27.0k
                hasher,
2828
27.0k
                fallibility,
2829
27.0k
                layout,
2830
            )
2831
        }
2832
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::reserve_rehash_inner::<_>
2833
2834
    /// Returns an iterator over full buckets indices in the table.
2835
    ///
2836
    /// # Safety
2837
    ///
2838
    /// Behavior is undefined if any of the following conditions are violated:
2839
    ///
2840
    /// * The caller has to ensure that the `RawTableInner` outlives the
2841
    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2842
    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2843
    ///   `full_buckets_indices` method unsafe.
2844
    ///
2845
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2846
    #[inline(always)]
2847
27.0k
    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2848
        // SAFETY:
2849
        // 1. Since the caller of this function ensures that the control bytes
2850
        //    are properly initialized and `self.ctrl(0)` points to the start
2851
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2852
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2853
        //    control bytes.
2854
        // 2. The value of `items` is equal to the amount of data (values) added
2855
        //    to the table.
2856
        //
2857
        //                         `ctrl` points here (to the start
2858
        //                         of the first control byte `CT0`)
2859
        //                          ∨
2860
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2861
        //                           \________  ________/
2862
        //                                    \/
2863
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2864
        //
2865
        // where: T0...T_n  - our stored data;
2866
        //        CT0...CT_n - control bytes or metadata for `data`.
2867
27.0k
        let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::<u8>());
2868
2869
27.0k
        FullBucketsIndices {
2870
27.0k
            // Load the first group
2871
27.0k
            // SAFETY: See explanation above.
2872
27.0k
            current_group: Group::load_aligned(ctrl.as_ptr().cast())
2873
27.0k
                .match_full()
2874
27.0k
                .into_iter(),
2875
27.0k
            group_first_index: 0,
2876
27.0k
            ctrl,
2877
27.0k
            items: self.items,
2878
27.0k
        }
2879
27.0k
    }
2880
2881
    /// Allocates a new table of a different size and moves the contents of the
2882
    /// current table into it.
2883
    ///
2884
    /// This uses dynamic dispatch to reduce the amount of
2885
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2886
    ///
2887
    /// # Safety
2888
    ///
2889
    /// If any of the following conditions are violated, the result is
2890
    /// [`undefined behavior`]:
2891
    ///
2892
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2893
    ///   to allocate this table;
2894
    ///
2895
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2896
    ///   used to allocate this table;
2897
    ///
2898
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2899
    ///
2900
    /// The caller of this function must ensure that `capacity >= self.items`
2901
    /// otherwise:
2902
    ///
2903
    /// * If `self.items != 0`, calling of this function with `capacity == 0`
2904
    ///   results in [`undefined behavior`].
2905
    ///
2906
    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
2907
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2908
    ///   results in [`undefined behavior`].
2909
    ///
2910
    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
2911
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2912
    ///   are never return (will go into an infinite loop).
2913
    ///
2914
    /// Note: It is recommended (but not required) that the new table's `capacity`
2915
    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
2916
    /// this function can never return. See [`RawTableInner::find_insert_index`] for
2917
    /// more information.
2918
    ///
2919
    /// [`RawTableInner::find_insert_index`]: RawTableInner::find_insert_index
2920
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2921
    #[allow(clippy::inline_always)]
2922
    #[inline(always)]
2923
27.0k
    unsafe fn resize_inner<A>(
2924
27.0k
        &mut self,
2925
27.0k
        alloc: &A,
2926
27.0k
        capacity: usize,
2927
27.0k
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2928
27.0k
        fallibility: Fallibility,
2929
27.0k
        layout: TableLayout,
2930
27.0k
    ) -> Result<(), TryReserveError>
2931
27.0k
    where
2932
27.0k
        A: Allocator,
2933
    {
2934
        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2935
        // that were used to allocate this table.
2936
27.0k
        let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
2937
2938
        // SAFETY: We know for sure that RawTableInner will outlive the
2939
        // returned `FullBucketsIndices` iterator, and the caller of this
2940
        // function ensures that the control bytes are properly initialized.
2941
725k
        for full_byte_index in self.full_buckets_indices() {
2942
725k
            // This may panic.
2943
725k
            let hash = hasher(self, full_byte_index);
2944
725k
2945
725k
            // SAFETY:
2946
725k
            // We can use a simpler version of insert() here since:
2947
725k
            // 1. There are no DELETED entries.
2948
725k
            // 2. We know there is enough space in the table.
2949
725k
            // 3. All elements are unique.
2950
725k
            // 4. The caller of this function guarantees that `capacity > 0`
2951
725k
            //    so `new_table` must already have some allocated memory.
2952
725k
            // 5. We set `growth_left` and `items` fields of the new table
2953
725k
            //    after the loop.
2954
725k
            // 6. We insert into the table, at the returned index, the data
2955
725k
            //    matching the given hash immediately after calling this function.
2956
725k
            let (new_index, _) = new_table.prepare_insert_index(hash);
2957
725k
2958
725k
            // SAFETY:
2959
725k
            //
2960
725k
            // * `src` is valid for reads of `layout.size` bytes, since the
2961
725k
            //   table is alive and the `full_byte_index` is guaranteed to be
2962
725k
            //   within bounds (see `FullBucketsIndices::next_impl`);
2963
725k
            //
2964
725k
            // * `dst` is valid for writes of `layout.size` bytes, since the
2965
725k
            //   caller ensures that `table_layout` matches the [`TableLayout`]
2966
725k
            //   that was used to allocate old table and we have the `new_index`
2967
725k
            //   returned by `prepare_insert_index`.
2968
725k
            //
2969
725k
            // * Both `src` and `dst` are properly aligned.
2970
725k
            //
2971
725k
            // * Both `src` and `dst` point to different region of memory.
2972
725k
            ptr::copy_nonoverlapping(
2973
725k
                self.bucket_ptr(full_byte_index, layout.size),
2974
725k
                new_table.bucket_ptr(new_index, layout.size),
2975
725k
                layout.size,
2976
725k
            );
2977
725k
        }
2978
2979
        // The hash function didn't panic, so we can safely set the
2980
        // `growth_left` and `items` fields of the new table.
2981
27.0k
        new_table.growth_left -= self.items;
2982
27.0k
        new_table.items = self.items;
2983
2984
        // We successfully copied all elements without panicking. Now replace
2985
        // self with the new table. The old table will have its memory freed but
2986
        // the items will not be dropped (since they have been moved into the
2987
        // new table).
2988
        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2989
        // that was used to allocate this table.
2990
27.0k
        mem::swap(self, &mut new_table);
2991
2992
27.0k
        Ok(())
2993
27.0k
    }
<hashbrown::raw::RawTableInner>::resize_inner::<hashbrown::raw::alloc::inner::Global>
Line
Count
Source
2923
27.0k
    unsafe fn resize_inner<A>(
2924
27.0k
        &mut self,
2925
27.0k
        alloc: &A,
2926
27.0k
        capacity: usize,
2927
27.0k
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2928
27.0k
        fallibility: Fallibility,
2929
27.0k
        layout: TableLayout,
2930
27.0k
    ) -> Result<(), TryReserveError>
2931
27.0k
    where
2932
27.0k
        A: Allocator,
2933
    {
2934
        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2935
        // that were used to allocate this table.
2936
27.0k
        let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
2937
2938
        // SAFETY: We know for sure that RawTableInner will outlive the
2939
        // returned `FullBucketsIndices` iterator, and the caller of this
2940
        // function ensures that the control bytes are properly initialized.
2941
725k
        for full_byte_index in self.full_buckets_indices() {
2942
725k
            // This may panic.
2943
725k
            let hash = hasher(self, full_byte_index);
2944
725k
2945
725k
            // SAFETY:
2946
725k
            // We can use a simpler version of insert() here since:
2947
725k
            // 1. There are no DELETED entries.
2948
725k
            // 2. We know there is enough space in the table.
2949
725k
            // 3. All elements are unique.
2950
725k
            // 4. The caller of this function guarantees that `capacity > 0`
2951
725k
            //    so `new_table` must already have some allocated memory.
2952
725k
            // 5. We set `growth_left` and `items` fields of the new table
2953
725k
            //    after the loop.
2954
725k
            // 6. We insert into the table, at the returned index, the data
2955
725k
            //    matching the given hash immediately after calling this function.
2956
725k
            let (new_index, _) = new_table.prepare_insert_index(hash);
2957
725k
2958
725k
            // SAFETY:
2959
725k
            //
2960
725k
            // * `src` is valid for reads of `layout.size` bytes, since the
2961
725k
            //   table is alive and the `full_byte_index` is guaranteed to be
2962
725k
            //   within bounds (see `FullBucketsIndices::next_impl`);
2963
725k
            //
2964
725k
            // * `dst` is valid for writes of `layout.size` bytes, since the
2965
725k
            //   caller ensures that `table_layout` matches the [`TableLayout`]
2966
725k
            //   that was used to allocate old table and we have the `new_index`
2967
725k
            //   returned by `prepare_insert_index`.
2968
725k
            //
2969
725k
            // * Both `src` and `dst` are properly aligned.
2970
725k
            //
2971
725k
            // * Both `src` and `dst` point to different region of memory.
2972
725k
            ptr::copy_nonoverlapping(
2973
725k
                self.bucket_ptr(full_byte_index, layout.size),
2974
725k
                new_table.bucket_ptr(new_index, layout.size),
2975
725k
                layout.size,
2976
725k
            );
2977
725k
        }
2978
2979
        // The hash function didn't panic, so we can safely set the
2980
        // `growth_left` and `items` fields of the new table.
2981
27.0k
        new_table.growth_left -= self.items;
2982
27.0k
        new_table.items = self.items;
2983
2984
        // We successfully copied all elements without panicking. Now replace
2985
        // self with the new table. The old table will have its memory freed but
2986
        // the items will not be dropped (since they have been moved into the
2987
        // new table).
2988
        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2989
        // that was used to allocate this table.
2990
27.0k
        mem::swap(self, &mut new_table);
2991
2992
27.0k
        Ok(())
2993
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::resize_inner::<_>
2994
2995
    /// Rehashes the contents of the table in place (i.e. without changing the
2996
    /// allocation).
2997
    ///
2998
    /// If `hasher` panics then some the table's contents may be lost.
2999
    ///
3000
    /// This uses dynamic dispatch to reduce the amount of
3001
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
3002
    ///
3003
    /// # Safety
3004
    ///
3005
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
3006
    ///
3007
    /// * The `size_of` must be equal to the size of the elements stored in the table;
3008
    ///
3009
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
3010
    ///   the elements stored in the table.
3011
    ///
3012
    /// * The [`RawTableInner`] has already been allocated;
3013
    ///
3014
    /// * The [`RawTableInner`] must have properly initialized control bytes.
3015
    ///
3016
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3017
    #[allow(clippy::inline_always)]
3018
    #[cfg_attr(feature = "inline-more", inline(always))]
3019
    #[cfg_attr(not(feature = "inline-more"), inline)]
3020
0
    unsafe fn rehash_in_place(
3021
0
        &mut self,
3022
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
3023
0
        size_of: usize,
3024
0
        drop: Option<unsafe fn(*mut u8)>,
3025
0
    ) {
3026
        // If the hash function panics then properly clean up any elements
3027
        // that we haven't rehashed yet. We unfortunately can't preserve the
3028
        // element since we lost their hash and have no way of recovering it
3029
        // without risking another panic.
3030
0
        self.prepare_rehash_in_place();
3031
3032
0
        let mut guard = guard(self, move |self_| {
3033
0
            if let Some(drop) = drop {
3034
0
                for i in 0..self_.buckets() {
3035
0
                    if *self_.ctrl(i) == Tag::DELETED {
3036
0
                        self_.set_ctrl(i, Tag::EMPTY);
3037
0
                        drop(self_.bucket_ptr(i, size_of));
3038
0
                        self_.items -= 1;
3039
0
                    }
3040
                }
3041
0
            }
3042
0
            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
3043
0
        });
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place::{closure#0}
3044
3045
        // At this point, DELETED elements are elements that we haven't
3046
        // rehashed yet. Find them and re-insert them at their ideal
3047
        // position.
3048
0
        'outer: for i in 0..guard.buckets() {
3049
0
            if *guard.ctrl(i) != Tag::DELETED {
3050
0
                continue;
3051
0
            }
3052
3053
0
            let i_p = guard.bucket_ptr(i, size_of);
3054
3055
            'inner: loop {
3056
                // Hash the current item
3057
0
                let hash = hasher(*guard, i);
3058
3059
                // Search for a suitable place to put it
3060
                //
3061
                // SAFETY: Caller of this function ensures that the control bytes
3062
                // are properly initialized.
3063
0
                let new_i = guard.find_insert_index(hash);
3064
3065
                // Probing works by scanning through all of the control
3066
                // bytes in groups, which may not be aligned to the group
3067
                // size. If both the new and old position fall within the
3068
                // same unaligned group, then there is no benefit in moving
3069
                // it and we can just continue to the next item.
3070
0
                if likely(guard.is_in_same_group(i, new_i, hash)) {
3071
0
                    guard.set_ctrl_hash(i, hash);
3072
0
                    continue 'outer;
3073
0
                }
3074
3075
0
                let new_i_p = guard.bucket_ptr(new_i, size_of);
3076
3077
                // We are moving the current item to a new position. Write
3078
                // our H2 to the control byte of the new position.
3079
0
                let prev_ctrl = guard.replace_ctrl_hash(new_i, hash);
3080
0
                if prev_ctrl == Tag::EMPTY {
3081
0
                    guard.set_ctrl(i, Tag::EMPTY);
3082
                    // If the target slot is empty, simply move the current
3083
                    // element into the new slot and clear the old control
3084
                    // byte.
3085
0
                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
3086
0
                    continue 'outer;
3087
                } else {
3088
                    // If the target slot is occupied, swap the two elements
3089
                    // and then continue processing the element that we just
3090
                    // swapped into the old slot.
3091
0
                    debug_assert_eq!(prev_ctrl, Tag::DELETED);
3092
0
                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
3093
0
                    continue 'inner;
3094
                }
3095
            }
3096
        }
3097
3098
0
        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
3099
3100
0
        mem::forget(guard);
3101
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place
3102
3103
    /// Deallocates the table without dropping any entries.
3104
    ///
3105
    /// # Note
3106
    ///
3107
    /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
3108
    /// else it can lead to leaking of memory. Also calling this function automatically
3109
    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
3110
    /// (dangling) the `ctrl` field of the table.
3111
    ///
3112
    /// # Safety
3113
    ///
3114
    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
3115
    ///
3116
    /// * The [`RawTableInner`] has already been allocated;
3117
    ///
3118
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
3119
    ///   to allocate this table.
3120
    ///
3121
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
3122
    ///   to allocate this table.
3123
    ///
3124
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3125
    ///
3126
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3127
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3128
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3129
    #[inline]
3130
27.0k
    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
3131
27.0k
    where
3132
27.0k
        A: Allocator,
3133
    {
3134
        // SAFETY: The caller must uphold the safety contract for `free_buckets`
3135
        // method.
3136
27.0k
        let (ptr, layout) = self.allocation_info(table_layout);
3137
27.0k
        alloc.deallocate(ptr, layout);
3138
27.0k
    }
<hashbrown::raw::RawTableInner>::free_buckets::<hashbrown::raw::alloc::inner::Global>
Line
Count
Source
3130
27.0k
    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
3131
27.0k
    where
3132
27.0k
        A: Allocator,
3133
    {
3134
        // SAFETY: The caller must uphold the safety contract for `free_buckets`
3135
        // method.
3136
27.0k
        let (ptr, layout) = self.allocation_info(table_layout);
3137
27.0k
        alloc.deallocate(ptr, layout);
3138
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::free_buckets::<_>
3139
3140
    /// Returns a pointer to the allocated memory and the layout that was used to
3141
    /// allocate the table.
3142
    ///
3143
    /// # Safety
3144
    ///
3145
    /// Caller of this function must observe the following safety rules:
3146
    ///
3147
    /// * The [`RawTableInner`] has already been allocated, otherwise
3148
    ///   calling this function results in [`undefined behavior`]
3149
    ///
3150
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3151
    ///   that was used to allocate this table. Failure to comply with this condition
3152
    ///   may result in [`undefined behavior`].
3153
    ///
3154
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3155
    ///
3156
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3157
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3158
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3159
    #[inline]
3160
27.0k
    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3161
27.0k
        debug_assert!(
3162
0
            !self.is_empty_singleton(),
3163
0
            "this function can only be called on non-empty tables"
3164
        );
3165
3166
        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
3167
27.0k
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
3168
27.0k
            Some(lco) => lco,
3169
0
            None => unsafe { hint::unreachable_unchecked() },
3170
        };
3171
27.0k
        (
3172
27.0k
            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
3173
27.0k
            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
3174
27.0k
            layout,
3175
27.0k
        )
3176
27.0k
    }
<hashbrown::raw::RawTableInner>::allocation_info
Line
Count
Source
3160
27.0k
    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3161
27.0k
        debug_assert!(
3162
0
            !self.is_empty_singleton(),
3163
0
            "this function can only be called on non-empty tables"
3164
        );
3165
3166
        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
3167
27.0k
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
3168
27.0k
            Some(lco) => lco,
3169
0
            None => unsafe { hint::unreachable_unchecked() },
3170
        };
3171
27.0k
        (
3172
27.0k
            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
3173
27.0k
            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
3174
27.0k
            layout,
3175
27.0k
        )
3176
27.0k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::allocation_info
3177
3178
    /// Returns the total amount of memory allocated internally by the hash
3179
    /// table, in bytes.
3180
    ///
3181
    /// The returned number is informational only. It is intended to be
3182
    /// primarily used for memory profiling.
3183
    ///
3184
    /// # Safety
3185
    ///
3186
    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3187
    /// that was used to allocate this table. Failure to comply with this condition
3188
    /// may result in [`undefined behavior`].
3189
    ///
3190
    ///
3191
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3192
    #[inline]
3193
0
    unsafe fn allocation_size_or_zero(&self, table_layout: TableLayout) -> usize {
3194
0
        if self.is_empty_singleton() {
3195
0
            0
3196
        } else {
3197
            // SAFETY:
3198
            // 1. We have checked that our table is allocated.
3199
            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
3200
            // that was used to allocate this table.
3201
0
            unsafe { self.allocation_info(table_layout).1.size() }
3202
        }
3203
0
    }
3204
3205
    /// Marks all table buckets as empty without dropping their contents.
3206
    #[inline]
3207
0
    fn clear_no_drop(&mut self) {
3208
0
        if !self.is_empty_singleton() {
3209
0
            self.ctrl_slice().fill_empty();
3210
0
        }
3211
0
        self.items = 0;
3212
0
        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
3213
0
    }
3214
3215
    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
3216
    /// triggered as full, decreases the `items` of the table and, if it can be done,
3217
    /// increases `self.growth_left`.
3218
    ///
3219
    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
3220
    /// does not make any changes to the `data` parts of the table. The caller of this
3221
    /// function must take care to properly drop the `data`, otherwise calling this
3222
    /// function may result in a memory leak.
3223
    ///
3224
    /// # Safety
3225
    ///
3226
    /// You must observe the following safety rules when calling this function:
3227
    ///
3228
    /// * The [`RawTableInner`] has already been allocated;
3229
    ///
3230
    /// * It must be the full control byte at the given position;
3231
    ///
3232
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
3233
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
3234
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
3235
    ///
3236
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
3237
    ///
3238
    /// Calling this function on a table with no elements is unspecified, but calling subsequent
3239
    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
3240
    /// (`self.items -= 1 cause overflow when self.items == 0`).
3241
    ///
3242
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
3243
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
3244
    ///
3245
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
3246
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
3247
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3248
    #[inline]
3249
478k
    unsafe fn erase(&mut self, index: usize) {
3250
478k
        debug_assert!(self.is_bucket_full(index));
3251
3252
        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3253
        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3254
478k
        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3255
        // SAFETY:
3256
        // - The caller must uphold the safety contract for `erase` method;
3257
        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3258
478k
        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3259
478k
        let empty_after = Group::load(self.ctrl(index)).match_empty();
3260
3261
        // Inserting and searching in the map is performed by two key functions:
3262
        //
3263
        // - The `find_insert_index` function that looks up the index of any `Tag::EMPTY` or `Tag::DELETED`
3264
        //   slot in a group to be able to insert. If it doesn't find an `Tag::EMPTY` or `Tag::DELETED`
3265
        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
3266
        //   and so on until it has gone through all the groups in the control bytes.
3267
        //
3268
        // - The `find_inner` function that looks for the index of the desired element by looking
3269
        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
3270
        //   there is no `Tag::EMPTY` byte in the group, then this means that the `find_insert_index`
3271
        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
3272
        //   jumps further, and if it does not find the desired element and again there is no `Tag::EMPTY`
3273
        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
3274
        //   finds the desired element or hits an `Tag::EMPTY` slot/byte.
3275
        //
3276
        // Accordingly, this leads to two consequences:
3277
        //
3278
        // - The map must have `Tag::EMPTY` slots (bytes);
3279
        //
3280
        // - You can't just mark the byte to be erased as `Tag::EMPTY`, because otherwise the `find_inner`
3281
        //   function may stumble upon an `Tag::EMPTY` byte before finding the desired element and stop
3282
        //   searching.
3283
        //
3284
        // Thus it is necessary to check all bytes after and before the erased element. If we are in
3285
        // a contiguous `Group` of `FULL` or `Tag::DELETED` bytes (the number of `FULL` or `Tag::DELETED` bytes
3286
        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3287
        // `Tag::DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3288
        // is at least one `Tag::EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3289
        // upon an `Tag::EMPTY` byte, so we can safely mark our erased byte as `Tag::EMPTY` as well.
3290
        //
3291
        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3292
        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3293
        // cannot have `Tag::DELETED` bytes.
3294
        //
3295
        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3296
        // `trailing_zeros` refers to the bytes at the beginning of a group.
3297
478k
        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3298
46.3k
            Tag::DELETED
3299
        } else {
3300
432k
            self.growth_left += 1;
3301
432k
            Tag::EMPTY
3302
        };
3303
        // SAFETY: the caller must uphold the safety contract for `erase` method.
3304
478k
        self.set_ctrl(index, ctrl);
3305
478k
        self.items -= 1;
3306
478k
    }
<hashbrown::raw::RawTableInner>::erase
Line
Count
Source
3249
478k
    unsafe fn erase(&mut self, index: usize) {
3250
478k
        debug_assert!(self.is_bucket_full(index));
3251
3252
        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3253
        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3254
478k
        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3255
        // SAFETY:
3256
        // - The caller must uphold the safety contract for `erase` method;
3257
        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3258
478k
        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3259
478k
        let empty_after = Group::load(self.ctrl(index)).match_empty();
3260
3261
        // Inserting and searching in the map is performed by two key functions:
3262
        //
3263
        // - The `find_insert_index` function that looks up the index of any `Tag::EMPTY` or `Tag::DELETED`
3264
        //   slot in a group to be able to insert. If it doesn't find an `Tag::EMPTY` or `Tag::DELETED`
3265
        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
3266
        //   and so on until it has gone through all the groups in the control bytes.
3267
        //
3268
        // - The `find_inner` function that looks for the index of the desired element by looking
3269
        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
3270
        //   there is no `Tag::EMPTY` byte in the group, then this means that the `find_insert_index`
3271
        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
3272
        //   jumps further, and if it does not find the desired element and again there is no `Tag::EMPTY`
3273
        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
3274
        //   finds the desired element or hits an `Tag::EMPTY` slot/byte.
3275
        //
3276
        // Accordingly, this leads to two consequences:
3277
        //
3278
        // - The map must have `Tag::EMPTY` slots (bytes);
3279
        //
3280
        // - You can't just mark the byte to be erased as `Tag::EMPTY`, because otherwise the `find_inner`
3281
        //   function may stumble upon an `Tag::EMPTY` byte before finding the desired element and stop
3282
        //   searching.
3283
        //
3284
        // Thus it is necessary to check all bytes after and before the erased element. If we are in
3285
        // a contiguous `Group` of `FULL` or `Tag::DELETED` bytes (the number of `FULL` or `Tag::DELETED` bytes
3286
        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3287
        // `Tag::DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3288
        // is at least one `Tag::EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3289
        // upon an `Tag::EMPTY` byte, so we can safely mark our erased byte as `Tag::EMPTY` as well.
3290
        //
3291
        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3292
        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3293
        // cannot have `Tag::DELETED` bytes.
3294
        //
3295
        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3296
        // `trailing_zeros` refers to the bytes at the beginning of a group.
3297
478k
        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3298
46.3k
            Tag::DELETED
3299
        } else {
3300
432k
            self.growth_left += 1;
3301
432k
            Tag::EMPTY
3302
        };
3303
        // SAFETY: the caller must uphold the safety contract for `erase` method.
3304
478k
        self.set_ctrl(index, ctrl);
3305
478k
        self.items -= 1;
3306
478k
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::erase
3307
}
3308
3309
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
3310
0
    fn clone(&self) -> Self {
3311
0
        if self.table.is_empty_singleton() {
3312
0
            Self::new_in(self.alloc.clone())
3313
        } else {
3314
            unsafe {
3315
                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3316
                //
3317
                // SAFETY: This is safe as we are taking the size of an already allocated table
3318
                // and therefore capacity overflow cannot occur, `self.table.buckets()` is power
3319
                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3320
0
                let mut new_table = match Self::new_uninitialized(
3321
0
                    self.alloc.clone(),
3322
0
                    self.table.buckets(),
3323
0
                    Fallibility::Infallible,
3324
0
                ) {
3325
0
                    Ok(table) => table,
3326
0
                    Err(_) => hint::unreachable_unchecked(),
3327
                };
3328
3329
                // Cloning elements may fail (the clone function may panic). But we don't
3330
                // need to worry about uninitialized control bits, since:
3331
                // 1. The number of items (elements) in the table is zero, which means that
3332
                //    the control bits will not be read by Drop function.
3333
                // 2. The `clone_from_spec` method will first copy all control bits from
3334
                //    `self` (thus initializing them). But this will not affect the `Drop`
3335
                //    function, since the `clone_from_spec` function sets `items` only after
3336
                //    successfully cloning all elements.
3337
0
                new_table.clone_from_spec(self);
3338
0
                new_table
3339
            }
3340
        }
3341
0
    }
3342
3343
0
    fn clone_from(&mut self, source: &Self) {
3344
0
        if source.table.is_empty_singleton() {
3345
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3346
0
            unsafe {
3347
0
                // SAFETY:
3348
0
                // 1. We call the function only once;
3349
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3350
0
                //    and [`TableLayout`] that were used to allocate this table.
3351
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
3352
0
                //    because we have replaced the inner table with a new one.
3353
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3354
0
            }
3355
        } else {
3356
            unsafe {
3357
                // Make sure that if any panics occurs, we clear the table and
3358
                // leave it in an empty state.
3359
0
                let mut self_ = guard(self, |self_| {
3360
0
                    self_.clear_no_drop();
3361
0
                });
3362
3363
                // First, drop all our elements without clearing the control
3364
                // bytes. If this panics then the scope guard will clear the
3365
                // table, leaking any elements that were not dropped yet.
3366
                //
3367
                // This leak is unavoidable: we can't try dropping more elements
3368
                // since this could lead to another panic and abort the process.
3369
                //
3370
                // SAFETY: If something gets wrong we clear our table right after
3371
                // dropping the elements, so there is no double drop, since `items`
3372
                // will be equal to zero.
3373
0
                self_.table.drop_elements::<T>();
3374
3375
                // If necessary, resize our table to match the source.
3376
0
                if self_.buckets() != source.buckets() {
3377
0
                    let new_inner = match RawTableInner::new_uninitialized(
3378
0
                        &self_.alloc,
3379
0
                        Self::TABLE_LAYOUT,
3380
0
                        source.buckets(),
3381
0
                        Fallibility::Infallible,
3382
0
                    ) {
3383
0
                        Ok(table) => table,
3384
0
                        Err(_) => hint::unreachable_unchecked(),
3385
                    };
3386
                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3387
                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3388
0
                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3389
0
                    if !old_inner.is_empty_singleton() {
3390
0
                        // SAFETY:
3391
0
                        // 1. We have checked that our table is allocated.
3392
0
                        // 2. We know for sure that `alloc` and `table_layout` matches
3393
0
                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3394
0
                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3395
0
                    }
3396
0
                }
3397
3398
                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3399
                // inside the `clone_from_impl` function will take care of that, dropping all
3400
                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3401
0
                self_.clone_from_spec(source);
3402
3403
                // Disarm the scope guard if cloning was successful.
3404
0
                ScopeGuard::into_inner(self_);
3405
            }
3406
        }
3407
0
    }
3408
}
3409
3410
/// Specialization of `clone_from` for `Copy` types
3411
trait RawTableClone {
3412
    unsafe fn clone_from_spec(&mut self, source: &Self);
3413
}
3414
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3415
    default_fn! {
3416
        #[cfg_attr(feature = "inline-more", inline)]
3417
0
        unsafe fn clone_from_spec(&mut self, source: &Self) {
3418
0
            self.clone_from_impl(source);
3419
0
        }
3420
    }
3421
}
3422
#[cfg(feature = "nightly")]
3423
impl<T: core::clone::TrivialClone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3424
    #[cfg_attr(feature = "inline-more", inline)]
3425
    unsafe fn clone_from_spec(&mut self, source: &Self) {
3426
        source
3427
            .table
3428
            .ctrl(0)
3429
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3430
        source
3431
            .data_start()
3432
            .as_ptr()
3433
            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3434
3435
        self.table.items = source.table.items;
3436
        self.table.growth_left = source.table.growth_left;
3437
    }
3438
}
3439
3440
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
3441
    /// Common code for `clone` and `clone_from`. Assumes:
3442
    /// - `self.buckets() == source.buckets()`.
3443
    /// - Any existing elements have been dropped.
3444
    /// - The control bytes are not initialized yet.
3445
    #[cfg_attr(feature = "inline-more", inline)]
3446
0
    unsafe fn clone_from_impl(&mut self, source: &Self) {
3447
        // Copy the control bytes unchanged. We do this in a single pass
3448
0
        source
3449
0
            .table
3450
0
            .ctrl(0)
3451
0
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3452
3453
        // The cloning of elements may panic, in which case we need
3454
        // to make sure we drop only the elements that have been
3455
        // cloned so far.
3456
0
        let mut guard = guard((0, &mut *self), |(index, self_)| {
3457
0
            if T::NEEDS_DROP {
3458
0
                for i in 0..*index {
3459
0
                    if self_.is_bucket_full(i) {
3460
0
                        self_.bucket(i).drop();
3461
0
                    }
3462
                }
3463
0
            }
3464
0
        });
3465
3466
0
        for from in source.iter() {
3467
0
            let index = source.bucket_index(&from);
3468
0
            let to = guard.1.bucket(index);
3469
0
            to.write(from.as_ref().clone());
3470
0
3471
0
            // Update the index in case we need to unwind.
3472
0
            guard.0 = index + 1;
3473
0
        }
3474
3475
        // Successfully cloned all items, no need to clean up.
3476
0
        mem::forget(guard);
3477
3478
0
        self.table.items = source.table.items;
3479
0
        self.table.growth_left = source.table.growth_left;
3480
0
    }
3481
}
3482
3483
impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3484
    #[inline]
3485
0
    fn default() -> Self {
3486
0
        Self::new_in(Default::default())
3487
0
    }
3488
}
3489
3490
#[cfg(feature = "nightly")]
3491
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3492
    #[cfg_attr(feature = "inline-more", inline)]
3493
    fn drop(&mut self) {
3494
        unsafe {
3495
            // SAFETY:
3496
            // 1. We call the function only once;
3497
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3498
            //    and [`TableLayout`] that were used to allocate this table.
3499
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3500
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3501
            //    so there won't be any table left in an inconsistent state.
3502
            self.table
3503
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3504
        }
3505
    }
3506
}
3507
#[cfg(not(feature = "nightly"))]
3508
impl<T, A: Allocator> Drop for RawTable<T, A> {
3509
    #[cfg_attr(feature = "inline-more", inline)]
3510
12.7k
    fn drop(&mut self) {
3511
12.7k
        unsafe {
3512
12.7k
            // SAFETY:
3513
12.7k
            // 1. We call the function only once;
3514
12.7k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3515
12.7k
            //    and [`TableLayout`] that were used to allocate this table.
3516
12.7k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3517
12.7k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3518
12.7k
            //    so there won't be any table left in an inconsistent state.
3519
12.7k
            self.table
3520
12.7k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3521
12.7k
        }
3522
12.7k
    }
<hashbrown::raw::RawTable<usize> as core::ops::drop::Drop>::drop
Line
Count
Source
3510
12.7k
    fn drop(&mut self) {
3511
12.7k
        unsafe {
3512
12.7k
            // SAFETY:
3513
12.7k
            // 1. We call the function only once;
3514
12.7k
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3515
12.7k
            //    and [`TableLayout`] that were used to allocate this table.
3516
12.7k
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3517
12.7k
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3518
12.7k
            //    so there won't be any table left in an inconsistent state.
3519
12.7k
            self.table
3520
12.7k
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3521
12.7k
        }
3522
12.7k
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _> as core::ops::drop::Drop>::drop
3523
}
3524
3525
impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3526
    type Item = T;
3527
    type IntoIter = RawIntoIter<T, A>;
3528
3529
    #[cfg_attr(feature = "inline-more", inline)]
3530
0
    fn into_iter(self) -> RawIntoIter<T, A> {
3531
        unsafe {
3532
0
            let iter = self.iter();
3533
0
            self.into_iter_from(iter)
3534
        }
3535
0
    }
3536
}
3537
3538
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3539
/// not track an item count.
3540
pub(crate) struct RawIterRange<T> {
3541
    // Mask of full buckets in the current group. Bits are cleared from this
3542
    // mask as each element is processed.
3543
    current_group: BitMaskIter,
3544
3545
    // Pointer to the buckets for the current group.
3546
    data: Bucket<T>,
3547
3548
    // Pointer to the next group of control bytes,
3549
    // Must be aligned to the group size.
3550
    next_ctrl: *const u8,
3551
3552
    // Pointer one past the last control byte of this range.
3553
    end: *const u8,
3554
}
3555
3556
impl<T> RawIterRange<T> {
3557
    /// Returns a `RawIterRange` covering a subset of a table.
3558
    ///
3559
    /// # Safety
3560
    ///
3561
    /// If any of the following conditions are violated, the result is
3562
    /// [`undefined behavior`]:
3563
    ///
3564
    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3565
    ///
3566
    /// * `ctrl` must be properly aligned to the group size (`Group::WIDTH`);
3567
    ///
3568
    /// * `ctrl` must point to the array of properly initialized control bytes;
3569
    ///
3570
    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3571
    ///
3572
    /// * the value of `len` must be less than or equal to the number of table buckets,
3573
    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3574
    ///   must be positive.
3575
    ///
3576
    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3577
    ///   byte past the end of the same [allocated table].
3578
    ///
3579
    /// * The `len` must be a power of two.
3580
    ///
3581
    /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
3582
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3583
    #[cfg_attr(feature = "inline-more", inline)]
3584
0
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3585
0
        debug_assert_ne!(len, 0);
3586
0
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3587
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3588
0
        let end = ctrl.add(len);
3589
3590
        // Load the first group and advance ctrl to point to the next group
3591
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3592
0
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3593
0
        let next_ctrl = ctrl.add(Group::WIDTH);
3594
3595
0
        Self {
3596
0
            current_group: current_group.into_iter(),
3597
0
            data,
3598
0
            next_ctrl,
3599
0
            end,
3600
0
        }
3601
0
    }
3602
3603
    /// Splits a `RawIterRange` into two halves.
3604
    ///
3605
    /// Returns `None` if the remaining range is smaller than or equal to the
3606
    /// group width.
3607
    #[cfg_attr(feature = "inline-more", inline)]
3608
    #[cfg(feature = "rayon")]
3609
    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3610
        unsafe {
3611
            if self.end <= self.next_ctrl {
3612
                // Nothing to split if the group that we are current processing
3613
                // is the last one.
3614
                (self, None)
3615
            } else {
3616
                // len is the remaining number of elements after the group that
3617
                // we are currently processing. It must be a multiple of the
3618
                // group size (small tables are caught by the check above).
3619
                let len = offset_from(self.end, self.next_ctrl);
3620
                debug_assert_eq!(len % Group::WIDTH, 0);
3621
3622
                // Split the remaining elements into two halves, but round the
3623
                // midpoint down in case there is an odd number of groups
3624
                // remaining. This ensures that:
3625
                // - The tail is at least 1 group long.
3626
                // - The split is roughly even considering we still have the
3627
                //   current group to process.
3628
                let mid = (len / 2) & !(Group::WIDTH - 1);
3629
3630
                let tail = Self::new(
3631
                    self.next_ctrl.add(mid),
3632
                    self.data.next_n(Group::WIDTH).next_n(mid),
3633
                    len - mid,
3634
                );
3635
                debug_assert_eq!(
3636
                    self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3637
                    tail.data.ptr
3638
                );
3639
                debug_assert_eq!(self.end, tail.end);
3640
                self.end = self.next_ctrl.add(mid);
3641
                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3642
                (self, Some(tail))
3643
            }
3644
        }
3645
    }
3646
3647
    /// # Safety
3648
    /// If `DO_CHECK_PTR_RANGE` is false, caller must ensure that we never try to iterate
3649
    /// after yielding all elements.
3650
    #[cfg_attr(feature = "inline-more", inline)]
3651
0
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3652
        loop {
3653
0
            if let Some(index) = self.current_group.next() {
3654
0
                return Some(self.data.next_n(index));
3655
0
            }
3656
3657
0
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3658
0
                return None;
3659
0
            }
3660
3661
            // We might read past self.end up to the next group boundary,
3662
            // but this is fine because it only occurs on tables smaller
3663
            // than the group size where the trailing control bytes are all
3664
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3665
            // to the group size (since tables are power-of-two sized).
3666
0
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3667
0
                .match_full()
3668
0
                .into_iter();
3669
0
            self.data = self.data.next_n(Group::WIDTH);
3670
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3671
        }
3672
0
    }
3673
3674
    /// Folds every element into an accumulator by applying an operation,
3675
    /// returning the final result.
3676
    ///
3677
    /// `fold_impl()` takes three arguments: the number of items remaining in
3678
    /// the iterator, an initial value, and a closure with two arguments: an
3679
    /// 'accumulator', and an element. The closure returns the value that the
3680
    /// accumulator should have for the next iteration.
3681
    ///
3682
    /// The initial value is the value the accumulator will have on the first call.
3683
    ///
3684
    /// After applying this closure to every element of the iterator, `fold_impl()`
3685
    /// returns the accumulator.
3686
    ///
3687
    /// # Safety
3688
    ///
3689
    /// If any of the following conditions are violated, the result is
3690
    /// [`Undefined Behavior`]:
3691
    ///
3692
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3693
    ///   i.e. table outlives the `RawIterRange`;
3694
    ///
3695
    /// * The provided `n` value must match the actual number of items
3696
    ///   in the table.
3697
    ///
3698
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3699
    #[allow(clippy::while_let_on_iterator)]
3700
    #[cfg_attr(feature = "inline-more", inline)]
3701
0
    unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
3702
0
    where
3703
0
        F: FnMut(B, Bucket<T>) -> B,
3704
    {
3705
        loop {
3706
0
            while let Some(index) = self.current_group.next() {
3707
                // The returned `index` will always be in the range `0..Group::WIDTH`,
3708
                // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
3709
0
                debug_assert!(n != 0);
3710
0
                let bucket = self.data.next_n(index);
3711
0
                acc = f(acc, bucket);
3712
0
                n -= 1;
3713
            }
3714
3715
0
            if n == 0 {
3716
0
                return acc;
3717
0
            }
3718
3719
            // SAFETY: The caller of this function ensures that:
3720
            //
3721
            // 1. The provided `n` value matches the actual number of items in the table;
3722
            // 2. The table is alive and did not moved.
3723
            //
3724
            // Taking the above into account, we always stay within the bounds, because:
3725
            //
3726
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3727
            //    we will never end up in the given branch, since we should have already
3728
            //    yielded all the elements of the table.
3729
            //
3730
            // 2. For tables larger than the group width. The number of buckets is a
3731
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3732
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3733
            //    start of the array of control bytes, and never try to iterate after
3734
            //    getting all the elements, the last `self.current_group` will read bytes
3735
            //    from the `self.buckets() - Group::WIDTH` index.  We know also that
3736
            //    `self.current_group.next()` will always return indices within the range
3737
            //    `0..Group::WIDTH`.
3738
            //
3739
            //    Knowing all of the above and taking into account that we are synchronizing
3740
            //    the `self.data` index with the index we used to read the `self.current_group`,
3741
            //    the subsequent `self.data.next_n(index)` will always return a bucket with
3742
            //    an index number less than `self.buckets()`.
3743
            //
3744
            //    The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
3745
            //    actually be read, since we should have already yielded all the elements of
3746
            //    the table.
3747
0
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3748
0
                .match_full()
3749
0
                .into_iter();
3750
0
            self.data = self.data.next_n(Group::WIDTH);
3751
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3752
        }
3753
0
    }
3754
}
3755
3756
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3757
// in the actual iterator implementations determine the real Send/Sync bounds.
3758
unsafe impl<T> Send for RawIterRange<T> {}
3759
unsafe impl<T> Sync for RawIterRange<T> {}
3760
3761
impl<T> Clone for RawIterRange<T> {
3762
    #[cfg_attr(feature = "inline-more", inline)]
3763
0
    fn clone(&self) -> Self {
3764
0
        Self {
3765
0
            data: self.data.clone(),
3766
0
            next_ctrl: self.next_ctrl,
3767
0
            current_group: self.current_group.clone(),
3768
0
            end: self.end,
3769
0
        }
3770
0
    }
3771
}
3772
3773
impl<T> Iterator for RawIterRange<T> {
3774
    type Item = Bucket<T>;
3775
3776
    #[cfg_attr(feature = "inline-more", inline)]
3777
0
    fn next(&mut self) -> Option<Bucket<T>> {
3778
        unsafe {
3779
            // SAFETY: We set checker flag to true.
3780
0
            self.next_impl::<true>()
3781
        }
3782
0
    }
3783
3784
    #[inline]
3785
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3786
        // We don't have an item count, so just guess based on the range size.
3787
0
        let remaining_buckets = if self.end > self.next_ctrl {
3788
0
            unsafe { offset_from(self.end, self.next_ctrl) }
3789
        } else {
3790
0
            0
3791
        };
3792
3793
        // Add a group width to include the group we are currently processing.
3794
0
        (0, Some(Group::WIDTH + remaining_buckets))
3795
0
    }
3796
}
3797
3798
impl<T> FusedIterator for RawIterRange<T> {}
3799
3800
/// Iterator which returns a raw pointer to every full bucket in the table.
3801
///
3802
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3803
/// must observe several rules when using it:
3804
/// - You must not free the hash table while iterating (including via growing/shrinking).
3805
/// - It is fine to erase a bucket that has been yielded by the iterator.
3806
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3807
///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3808
/// - It is unspecified whether an element inserted after the iterator was
3809
///   created will be yielded by that iterator (unless `reflect_insert` is called).
3810
/// - The order in which the iterator yields bucket is unspecified and may
3811
///   change in the future.
3812
pub struct RawIter<T> {
3813
    pub(crate) iter: RawIterRange<T>,
3814
    items: usize,
3815
}
3816
3817
impl<T> RawIter<T> {
3818
0
    unsafe fn drop_elements(&mut self) {
3819
0
        if T::NEEDS_DROP && self.items != 0 {
3820
0
            for item in self {
3821
0
                item.drop();
3822
0
            }
3823
0
        }
3824
0
    }
3825
}
3826
3827
impl<T> Clone for RawIter<T> {
3828
    #[cfg_attr(feature = "inline-more", inline)]
3829
0
    fn clone(&self) -> Self {
3830
0
        Self {
3831
0
            iter: self.iter.clone(),
3832
0
            items: self.items,
3833
0
        }
3834
0
    }
3835
}
3836
impl<T> Default for RawIter<T> {
3837
    #[cfg_attr(feature = "inline-more", inline)]
3838
0
    fn default() -> Self {
3839
        // SAFETY: Because the table is static, it always outlives the iter.
3840
0
        unsafe { RawTableInner::NEW.iter() }
3841
0
    }
3842
}
3843
3844
impl<T> Iterator for RawIter<T> {
3845
    type Item = Bucket<T>;
3846
3847
    #[cfg_attr(feature = "inline-more", inline)]
3848
0
    fn next(&mut self) -> Option<Bucket<T>> {
3849
        // Inner iterator iterates over buckets
3850
        // so it can do unnecessary work if we already yielded all items.
3851
0
        if self.items == 0 {
3852
0
            return None;
3853
0
        }
3854
3855
0
        let nxt = unsafe {
3856
            // SAFETY: We check number of items to yield using `items` field.
3857
0
            self.iter.next_impl::<false>()
3858
        };
3859
3860
0
        debug_assert!(nxt.is_some());
3861
0
        self.items -= 1;
3862
3863
0
        nxt
3864
0
    }
3865
3866
    #[inline]
3867
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3868
0
        (self.items, Some(self.items))
3869
0
    }
3870
3871
    #[inline]
3872
0
    fn fold<B, F>(self, init: B, f: F) -> B
3873
0
    where
3874
0
        Self: Sized,
3875
0
        F: FnMut(B, Self::Item) -> B,
3876
    {
3877
0
        unsafe { self.iter.fold_impl(self.items, init, f) }
3878
0
    }
3879
}
3880
3881
impl<T> ExactSizeIterator for RawIter<T> {}
3882
impl<T> FusedIterator for RawIter<T> {}
3883
3884
/// Iterator which returns an index of every full bucket in the table.
3885
///
3886
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3887
/// must observe several rules when using it:
3888
/// - You must not free the hash table while iterating (including via growing/shrinking).
3889
/// - It is fine to erase a bucket that has been yielded by the iterator.
3890
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3891
///   result in the iterator yielding index of that bucket.
3892
/// - It is unspecified whether an element inserted after the iterator was
3893
///   created will be yielded by that iterator.
3894
/// - The order in which the iterator yields indices of the buckets is unspecified
3895
///   and may change in the future.
3896
#[derive(Clone)]
3897
pub(crate) struct FullBucketsIndices {
3898
    // Mask of full buckets in the current group. Bits are cleared from this
3899
    // mask as each element is processed.
3900
    current_group: BitMaskIter,
3901
3902
    // Initial value of the bytes' indices of the current group (relative
3903
    // to the start of the control bytes).
3904
    group_first_index: usize,
3905
3906
    // Pointer to the current group of control bytes,
3907
    // Must be aligned to the group size (Group::WIDTH).
3908
    ctrl: NonNull<u8>,
3909
3910
    // Number of elements in the table.
3911
    items: usize,
3912
}
3913
3914
impl Default for FullBucketsIndices {
3915
    #[cfg_attr(feature = "inline-more", inline)]
3916
0
    fn default() -> Self {
3917
        // SAFETY: Because the table is static, it always outlives the iter.
3918
0
        unsafe { RawTableInner::NEW.full_buckets_indices() }
3919
0
    }
3920
}
3921
3922
impl FullBucketsIndices {
3923
    /// Advances the iterator and returns the next value.
3924
    ///
3925
    /// # Safety
3926
    ///
3927
    /// If any of the following conditions are violated, the result is
3928
    /// [`Undefined Behavior`]:
3929
    ///
3930
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3931
    ///   i.e. table outlives the `FullBucketsIndices`;
3932
    ///
3933
    /// * It never tries to iterate after getting all elements.
3934
    ///
3935
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3936
    #[inline(always)]
3937
725k
    unsafe fn next_impl(&mut self) -> Option<usize> {
3938
        loop {
3939
762k
            if let Some(index) = self.current_group.next() {
3940
                // The returned `self.group_first_index + index` will always
3941
                // be in the range `0..self.buckets()`. See explanation below.
3942
725k
                return Some(self.group_first_index + index);
3943
36.0k
            }
3944
3945
            // SAFETY: The caller of this function ensures that:
3946
            //
3947
            // 1. It never tries to iterate after getting all the elements;
3948
            // 2. The table is alive and did not moved;
3949
            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
3950
            //
3951
            // Taking the above into account, we always stay within the bounds, because:
3952
            //
3953
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3954
            //    we will never end up in the given branch, since we should have already
3955
            //    yielded all the elements of the table.
3956
            //
3957
            // 2. For tables larger than the group width. The number of buckets is a
3958
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3959
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3960
            //    the start of the array of control bytes, and never try to iterate after
3961
            //    getting all the elements, the last `self.ctrl` will be equal to
3962
            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
3963
            //    will always contains indices within the range `0..Group::WIDTH`,
3964
            //    and subsequent `self.group_first_index + index` will always return a
3965
            //    number less than `self.buckets()`.
3966
36.0k
            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
3967
3968
            // SAFETY: See explanation above.
3969
36.0k
            self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast())
3970
36.0k
                .match_full()
3971
36.0k
                .into_iter();
3972
36.0k
            self.group_first_index += Group::WIDTH;
3973
        }
3974
725k
    }
3975
}
3976
3977
impl Iterator for FullBucketsIndices {
3978
    type Item = usize;
3979
3980
    /// Advances the iterator and returns the next value. It is up to
3981
    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
3982
    /// because we cannot make the `next` method unsafe.
3983
    #[inline(always)]
3984
753k
    fn next(&mut self) -> Option<usize> {
3985
        // Return if we already yielded all items.
3986
753k
        if self.items == 0 {
3987
27.0k
            return None;
3988
725k
        }
3989
3990
725k
        let nxt = unsafe {
3991
            // SAFETY:
3992
            // 1. We check number of items to yield using `items` field.
3993
            // 2. The caller ensures that the table is alive and has not moved.
3994
725k
            self.next_impl()
3995
        };
3996
3997
725k
        debug_assert!(nxt.is_some());
3998
725k
        self.items -= 1;
3999
4000
725k
        nxt
4001
753k
    }
4002
4003
    #[inline(always)]
4004
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4005
0
        (self.items, Some(self.items))
4006
0
    }
4007
}
4008
4009
impl ExactSizeIterator for FullBucketsIndices {}
4010
impl FusedIterator for FullBucketsIndices {}
4011
4012
/// Iterator which consumes a table and returns elements.
4013
pub struct RawIntoIter<T, A: Allocator = Global> {
4014
    iter: RawIter<T>,
4015
    allocation: Option<(NonNull<u8>, Layout, A)>,
4016
    marker: PhantomData<T>,
4017
}
4018
4019
impl<T, A: Allocator> RawIntoIter<T, A> {
4020
    #[cfg_attr(feature = "inline-more", inline)]
4021
0
    pub fn iter(&self) -> RawIter<T> {
4022
0
        self.iter.clone()
4023
0
    }
4024
}
4025
4026
unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
4027
where
4028
    T: Send,
4029
    A: Send,
4030
{
4031
}
4032
unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
4033
where
4034
    T: Sync,
4035
    A: Sync,
4036
{
4037
}
4038
4039
#[cfg(feature = "nightly")]
4040
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
4041
    #[cfg_attr(feature = "inline-more", inline)]
4042
    fn drop(&mut self) {
4043
        unsafe {
4044
            // Drop all remaining elements
4045
            self.iter.drop_elements();
4046
4047
            // Free the table
4048
            if let Some((ptr, layout, ref alloc)) = self.allocation {
4049
                alloc.deallocate(ptr, layout);
4050
            }
4051
        }
4052
    }
4053
}
4054
#[cfg(not(feature = "nightly"))]
4055
impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
4056
    #[cfg_attr(feature = "inline-more", inline)]
4057
0
    fn drop(&mut self) {
4058
        unsafe {
4059
            // Drop all remaining elements
4060
0
            self.iter.drop_elements();
4061
4062
            // Free the table
4063
0
            if let Some((ptr, layout, ref alloc)) = self.allocation {
4064
0
                alloc.deallocate(ptr, layout);
4065
0
            }
4066
        }
4067
0
    }
4068
}
4069
4070
impl<T, A: Allocator> Default for RawIntoIter<T, A> {
4071
0
    fn default() -> Self {
4072
0
        Self {
4073
0
            iter: Default::default(),
4074
0
            allocation: None,
4075
0
            marker: PhantomData,
4076
0
        }
4077
0
    }
4078
}
4079
impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
4080
    type Item = T;
4081
4082
    #[cfg_attr(feature = "inline-more", inline)]
4083
0
    fn next(&mut self) -> Option<T> {
4084
0
        unsafe { Some(self.iter.next()?.read()) }
4085
0
    }
4086
4087
    #[inline]
4088
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4089
0
        self.iter.size_hint()
4090
0
    }
4091
}
4092
4093
impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
4094
impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
4095
4096
/// Iterator which consumes elements without freeing the table storage.
4097
pub struct RawDrain<'a, T, A: Allocator = Global> {
4098
    iter: RawIter<T>,
4099
4100
    // The table is moved into the iterator for the duration of the drain. This
4101
    // ensures that an empty table is left if the drain iterator is leaked
4102
    // without dropping.
4103
    table: RawTableInner,
4104
    orig_table: NonNull<RawTableInner>,
4105
4106
    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
4107
    // covariant over T.
4108
    marker: PhantomData<&'a RawTable<T, A>>,
4109
}
4110
4111
impl<T, A: Allocator> RawDrain<'_, T, A> {
4112
    #[cfg_attr(feature = "inline-more", inline)]
4113
0
    pub fn iter(&self) -> RawIter<T> {
4114
0
        self.iter.clone()
4115
0
    }
4116
}
4117
4118
unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
4119
where
4120
    T: Send,
4121
    A: Send,
4122
{
4123
}
4124
unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
4125
where
4126
    T: Sync,
4127
    A: Sync,
4128
{
4129
}
4130
4131
impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
4132
    #[cfg_attr(feature = "inline-more", inline)]
4133
0
    fn drop(&mut self) {
4134
0
        unsafe {
4135
0
            // Drop all remaining elements. Note that this may panic.
4136
0
            self.iter.drop_elements();
4137
0
4138
0
            // Reset the contents of the table now that all elements have been
4139
0
            // dropped.
4140
0
            self.table.clear_no_drop();
4141
0
4142
0
            // Move the now empty table back to its original location.
4143
0
            self.orig_table
4144
0
                .as_ptr()
4145
0
                .copy_from_nonoverlapping(&self.table, 1);
4146
0
        }
4147
0
    }
4148
}
4149
4150
impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
4151
    type Item = T;
4152
4153
    #[cfg_attr(feature = "inline-more", inline)]
4154
0
    fn next(&mut self) -> Option<T> {
4155
        unsafe {
4156
0
            let item = self.iter.next()?;
4157
0
            Some(item.read())
4158
        }
4159
0
    }
4160
4161
    #[inline]
4162
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4163
0
        self.iter.size_hint()
4164
0
    }
4165
}
4166
4167
impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
4168
impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
4169
4170
/// Iterator over occupied buckets that could match a given hash.
4171
///
4172
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
4173
/// items that have a hash value different than the one provided. You should
4174
/// always validate the returned values before using them.
4175
///
4176
/// For maximum flexibility this iterator is not bound by a lifetime, but you
4177
/// must observe several rules when using it:
4178
/// - You must not free the hash table while iterating (including via growing/shrinking).
4179
/// - It is fine to erase a bucket that has been yielded by the iterator.
4180
/// - Erasing a bucket that has not yet been yielded by the iterator may still
4181
///   result in the iterator yielding that bucket.
4182
/// - It is unspecified whether an element inserted after the iterator was
4183
///   created will be yielded by that iterator.
4184
/// - The order in which the iterator yields buckets is unspecified and may
4185
///   change in the future.
4186
pub struct RawIterHash<T> {
4187
    inner: RawIterHashIndices,
4188
    _marker: PhantomData<T>,
4189
}
4190
4191
#[derive(Clone)]
4192
pub(crate) struct RawIterHashIndices {
4193
    // See `RawTableInner`'s corresponding fields for details.
4194
    // We can't store a `*const RawTableInner` as it would get
4195
    // invalidated by the user calling `&mut` methods on `RawTable`.
4196
    bucket_mask: usize,
4197
    ctrl: NonNull<u8>,
4198
4199
    // The top 7 bits of the hash.
4200
    tag_hash: Tag,
4201
4202
    // The sequence of groups to probe in the search.
4203
    probe_seq: ProbeSeq,
4204
4205
    group: Group,
4206
4207
    // The elements within the group with a matching tag-hash.
4208
    bitmask: BitMaskIter,
4209
}
4210
4211
impl<T> RawIterHash<T> {
4212
    #[cfg_attr(feature = "inline-more", inline)]
4213
0
    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
4214
0
        RawIterHash {
4215
0
            inner: RawIterHashIndices::new(&table.table, hash),
4216
0
            _marker: PhantomData,
4217
0
        }
4218
0
    }
4219
}
4220
4221
impl<T> Clone for RawIterHash<T> {
4222
    #[cfg_attr(feature = "inline-more", inline)]
4223
0
    fn clone(&self) -> Self {
4224
0
        Self {
4225
0
            inner: self.inner.clone(),
4226
0
            _marker: PhantomData,
4227
0
        }
4228
0
    }
4229
}
4230
4231
impl<T> Default for RawIterHash<T> {
4232
    #[cfg_attr(feature = "inline-more", inline)]
4233
0
    fn default() -> Self {
4234
0
        Self {
4235
0
            inner: RawIterHashIndices::default(),
4236
0
            _marker: PhantomData,
4237
0
        }
4238
0
    }
4239
}
4240
4241
impl Default for RawIterHashIndices {
4242
    #[cfg_attr(feature = "inline-more", inline)]
4243
0
    fn default() -> Self {
4244
        // SAFETY: Because the table is static, it always outlives the iter.
4245
0
        unsafe { RawIterHashIndices::new(&RawTableInner::NEW, 0) }
4246
0
    }
4247
}
4248
4249
impl RawIterHashIndices {
4250
    #[cfg_attr(feature = "inline-more", inline)]
4251
0
    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
4252
0
        let tag_hash = Tag::full(hash);
4253
0
        let probe_seq = table.probe_seq(hash);
4254
0
        let group = Group::load(table.ctrl(probe_seq.pos));
4255
0
        let bitmask = group.match_tag(tag_hash).into_iter();
4256
4257
0
        RawIterHashIndices {
4258
0
            bucket_mask: table.bucket_mask,
4259
0
            ctrl: table.ctrl,
4260
0
            tag_hash,
4261
0
            probe_seq,
4262
0
            group,
4263
0
            bitmask,
4264
0
        }
4265
0
    }
4266
}
4267
4268
impl<T> Iterator for RawIterHash<T> {
4269
    type Item = Bucket<T>;
4270
4271
0
    fn next(&mut self) -> Option<Bucket<T>> {
4272
        unsafe {
4273
0
            match self.inner.next() {
4274
0
                Some(index) => {
4275
                    // Can't use `RawTable::bucket` here as we don't have
4276
                    // an actual `RawTable` reference to use.
4277
0
                    debug_assert!(index <= self.inner.bucket_mask);
4278
0
                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4279
0
                    Some(bucket)
4280
                }
4281
0
                None => None,
4282
            }
4283
        }
4284
0
    }
4285
}
4286
4287
impl Iterator for RawIterHashIndices {
4288
    type Item = usize;
4289
4290
0
    fn next(&mut self) -> Option<Self::Item> {
4291
        unsafe {
4292
            loop {
4293
0
                if let Some(bit) = self.bitmask.next() {
4294
0
                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4295
0
                    return Some(index);
4296
0
                }
4297
0
                if likely(self.group.match_empty().any_bit_set()) {
4298
0
                    return None;
4299
0
                }
4300
0
                self.probe_seq.move_next(self.bucket_mask);
4301
4302
                // Can't use `RawTableInner::ctrl` here as we don't have
4303
                // an actual `RawTableInner` reference to use.
4304
0
                let index = self.probe_seq.pos;
4305
0
                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4306
0
                let group_ctrl = self.ctrl.as_ptr().add(index).cast();
4307
4308
0
                self.group = Group::load(group_ctrl);
4309
0
                self.bitmask = self.group.match_tag(self.tag_hash).into_iter();
4310
            }
4311
        }
4312
0
    }
4313
}
4314
4315
pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
4316
    pub iter: RawIter<T>,
4317
    pub table: &'a mut RawTable<T, A>,
4318
}
4319
4320
impl<T, A: Allocator> RawExtractIf<'_, T, A> {
4321
    #[cfg_attr(feature = "inline-more", inline)]
4322
0
    pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
4323
0
    where
4324
0
        F: FnMut(&mut T) -> bool,
4325
    {
4326
        unsafe {
4327
0
            for item in &mut self.iter {
4328
0
                if f(item.as_mut()) {
4329
0
                    return Some(self.table.remove(item).0);
4330
0
                }
4331
            }
4332
        }
4333
0
        None
4334
0
    }
4335
}
4336
4337
#[cfg(test)]
4338
mod test_map {
4339
    use super::*;
4340
4341
    #[test]
4342
    fn test_prev_pow2() {
4343
        // Skip 0, not defined for that input.
4344
        let mut pow2: usize = 1;
4345
        while (pow2 << 1) > 0 {
4346
            let next_pow2 = pow2 << 1;
4347
            assert_eq!(pow2, prev_pow2(pow2));
4348
            // Need to skip 2, because it's also a power of 2, so it doesn't
4349
            // return the previous power of 2.
4350
            if next_pow2 > 2 {
4351
                assert_eq!(pow2, prev_pow2(pow2 + 1));
4352
                assert_eq!(pow2, prev_pow2(next_pow2 - 1));
4353
            }
4354
            pow2 = next_pow2;
4355
        }
4356
    }
4357
4358
    #[test]
4359
    fn test_minimum_capacity_for_small_types() {
4360
        #[track_caller]
4361
        fn test_t<T>() {
4362
            let raw_table: RawTable<T> = RawTable::with_capacity(1);
4363
            let actual_buckets = raw_table.buckets();
4364
            let min_buckets = Group::WIDTH / core::mem::size_of::<T>();
4365
            assert!(
4366
                actual_buckets >= min_buckets,
4367
                "expected at least {min_buckets} buckets, got {actual_buckets} buckets"
4368
            );
4369
        }
4370
4371
        test_t::<u8>();
4372
4373
        // This is only "small" for some platforms, like x86_64 with SSE2, but
4374
        // there's no harm in running it on other platforms.
4375
        test_t::<u16>();
4376
    }
4377
4378
    fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
4379
        unsafe {
4380
            table.table.rehash_in_place(
4381
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
4382
                mem::size_of::<T>(),
4383
                if mem::needs_drop::<T>() {
4384
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
4385
                } else {
4386
                    None
4387
                },
4388
            );
4389
        }
4390
    }
4391
4392
    #[test]
4393
    fn rehash() {
4394
        let mut table = RawTable::new();
4395
        let hasher = |i: &u64| *i;
4396
        for i in 0..100 {
4397
            table.insert(i, i, hasher);
4398
        }
4399
4400
        for i in 0..100 {
4401
            unsafe {
4402
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4403
            }
4404
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4405
        }
4406
4407
        rehash_in_place(&mut table, hasher);
4408
4409
        for i in 0..100 {
4410
            unsafe {
4411
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4412
            }
4413
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4414
        }
4415
    }
4416
4417
    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4418
    /// AN UNINITIALIZED TABLE DURING THE DROP
4419
    #[test]
4420
    fn test_drop_uninitialized() {
4421
        use ::alloc::vec::Vec;
4422
4423
        let table = unsafe {
4424
            // SAFETY: The `buckets` is power of two and we're not
4425
            // trying to actually use the returned RawTable.
4426
            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4427
                .unwrap()
4428
        };
4429
        drop(table);
4430
    }
4431
4432
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4433
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4434
    #[test]
4435
    fn test_drop_zero_items() {
4436
        use ::alloc::vec::Vec;
4437
        unsafe {
4438
            // SAFETY: The `buckets` is power of two and we're not
4439
            // trying to actually use the returned RawTable.
4440
            let mut table =
4441
                RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4442
                    .unwrap();
4443
4444
            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4445
4446
            // SAFETY: We checked that the table is allocated and therefore the table already has
4447
            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4448
            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4449
            table.table.ctrl_slice().fill_empty();
4450
4451
            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4452
            table.table.ctrl(0).write_bytes(0, table.capacity());
4453
4454
            // Fix up the trailing control bytes. See the comments in set_ctrl
4455
            // for the handling of tables smaller than the group width.
4456
            if table.buckets() < Group::WIDTH {
4457
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4458
                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4459
                // `Group::WIDTH` is safe
4460
                table
4461
                    .table
4462
                    .ctrl(0)
4463
                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4464
            } else {
4465
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4466
                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4467
                // to `self.buckets() == self.bucket_mask + 1` is safe
4468
                table
4469
                    .table
4470
                    .ctrl(0)
4471
                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4472
            }
4473
            drop(table);
4474
        }
4475
    }
4476
4477
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4478
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4479
    #[test]
4480
    fn test_catch_panic_clone_from() {
4481
        use super::{AllocError, Allocator, Global};
4482
        use ::alloc::sync::Arc;
4483
        use ::alloc::vec::Vec;
4484
        use core::sync::atomic::{AtomicI8, Ordering};
4485
        use std::thread;
4486
4487
        struct MyAllocInner {
4488
            drop_count: Arc<AtomicI8>,
4489
        }
4490
4491
        #[derive(Clone)]
4492
        struct MyAlloc {
4493
            _inner: Arc<MyAllocInner>,
4494
        }
4495
4496
        impl Drop for MyAllocInner {
4497
            fn drop(&mut self) {
4498
                println!("MyAlloc freed.");
4499
                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4500
            }
4501
        }
4502
4503
        unsafe impl Allocator for MyAlloc {
4504
            fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
4505
                let g = Global;
4506
                g.allocate(layout)
4507
            }
4508
4509
            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4510
                let g = Global;
4511
                g.deallocate(ptr, layout)
4512
            }
4513
        }
4514
4515
        const DISARMED: bool = false;
4516
        const ARMED: bool = true;
4517
4518
        struct CheckedCloneDrop {
4519
            panic_in_clone: bool,
4520
            dropped: bool,
4521
            need_drop: Vec<u64>,
4522
        }
4523
4524
        impl Clone for CheckedCloneDrop {
4525
            fn clone(&self) -> Self {
4526
                if self.panic_in_clone {
4527
                    panic!("panic in clone")
4528
                }
4529
                Self {
4530
                    panic_in_clone: self.panic_in_clone,
4531
                    dropped: self.dropped,
4532
                    need_drop: self.need_drop.clone(),
4533
                }
4534
            }
4535
        }
4536
4537
        impl Drop for CheckedCloneDrop {
4538
            fn drop(&mut self) {
4539
                if self.dropped {
4540
                    panic!("double drop");
4541
                }
4542
                self.dropped = true;
4543
            }
4544
        }
4545
4546
        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4547
4548
        let mut table = RawTable::new_in(MyAlloc {
4549
            _inner: Arc::new(MyAllocInner {
4550
                drop_count: dropped.clone(),
4551
            }),
4552
        });
4553
4554
        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4555
            let idx = idx as u64;
4556
            table.insert(
4557
                idx,
4558
                (
4559
                    idx,
4560
                    CheckedCloneDrop {
4561
                        panic_in_clone,
4562
                        dropped: false,
4563
                        need_drop: vec![idx],
4564
                    },
4565
                ),
4566
                |(k, _)| *k,
4567
            );
4568
        }
4569
4570
        assert_eq!(table.len(), 7);
4571
4572
        thread::scope(|s| {
4573
            let result = s.spawn(|| {
4574
                let armed_flags = [
4575
                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4576
                ];
4577
                let mut scope_table = RawTable::new_in(MyAlloc {
4578
                    _inner: Arc::new(MyAllocInner {
4579
                        drop_count: dropped.clone(),
4580
                    }),
4581
                });
4582
                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4583
                    let idx = idx as u64;
4584
                    scope_table.insert(
4585
                        idx,
4586
                        (
4587
                            idx,
4588
                            CheckedCloneDrop {
4589
                                panic_in_clone,
4590
                                dropped: false,
4591
                                need_drop: vec![idx + 100],
4592
                            },
4593
                        ),
4594
                        |(k, _)| *k,
4595
                    );
4596
                }
4597
                table.clone_from(&scope_table);
4598
            });
4599
            assert!(result.join().is_err());
4600
        });
4601
4602
        // Let's check that all iterators work fine and do not return elements
4603
        // (especially `RawIterRange`, which does not depend on the number of
4604
        // elements in the table, but looks directly at the control bytes)
4605
        //
4606
        // SAFETY: We know for sure that `RawTable` will outlive
4607
        // the returned `RawIter / RawIterRange` iterator.
4608
        assert_eq!(table.len(), 0);
4609
        assert_eq!(unsafe { table.iter().count() }, 0);
4610
        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4611
4612
        for idx in 0..table.buckets() {
4613
            let idx = idx as u64;
4614
            assert!(
4615
                table.find(idx, |(k, _)| *k == idx).is_none(),
4616
                "Index: {idx}"
4617
            );
4618
        }
4619
4620
        // All allocator clones should already be dropped.
4621
        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4622
    }
4623
}