Coverage Report

Created: 2025-07-01 06:04

/rust/registry/src/index.crates.io-6f17d22bba15001f/hashbrown-0.15.4/src/raw/mod.rs
Line
Count
Source (jump to first uncovered line)
1
use crate::alloc::alloc::{handle_alloc_error, Layout};
2
use crate::control::{BitMaskIter, Group, Tag, TagSliceExt};
3
use crate::scopeguard::{guard, ScopeGuard};
4
use crate::util::{invalid_mut, likely, unlikely};
5
use crate::TryReserveError;
6
use core::array;
7
use core::iter::FusedIterator;
8
use core::marker::PhantomData;
9
use core::mem;
10
use core::ptr::NonNull;
11
use core::slice;
12
use core::{hint, ptr};
13
14
mod alloc;
15
#[cfg(test)]
16
pub(crate) use self::alloc::AllocError;
17
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
18
19
#[inline]
20
0
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
21
0
    to.offset_from(from) as usize
22
0
}
23
24
/// Whether memory allocation errors should return an error or abort.
25
#[derive(Copy, Clone)]
26
enum Fallibility {
27
    Fallible,
28
    Infallible,
29
}
30
31
impl Fallibility {
32
    /// Error to return on capacity overflow.
33
    #[cfg_attr(feature = "inline-more", inline)]
34
0
    fn capacity_overflow(self) -> TryReserveError {
35
0
        match self {
36
0
            Fallibility::Fallible => TryReserveError::CapacityOverflow,
37
0
            Fallibility::Infallible => panic!("Hash table capacity overflow"),
38
        }
39
0
    }
40
41
    /// Error to return on allocation error.
42
    #[cfg_attr(feature = "inline-more", inline)]
43
0
    fn alloc_err(self, layout: Layout) -> TryReserveError {
44
0
        match self {
45
0
            Fallibility::Fallible => TryReserveError::AllocError { layout },
46
0
            Fallibility::Infallible => handle_alloc_error(layout),
47
        }
48
0
    }
49
}
50
51
trait SizedTypeProperties: Sized {
52
    const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
53
    const NEEDS_DROP: bool = mem::needs_drop::<Self>();
54
}
55
56
impl<T> SizedTypeProperties for T {}
57
58
/// Primary hash function, used to select the initial bucket to probe from.
59
#[inline]
60
#[allow(clippy::cast_possible_truncation)]
61
0
fn h1(hash: u64) -> usize {
62
0
    // On 32-bit platforms we simply ignore the higher hash bits.
63
0
    hash as usize
64
0
}
Unexecuted instantiation: hashbrown::raw::h1
Unexecuted instantiation: hashbrown::raw::h1
65
66
/// Probe sequence based on triangular numbers, which is guaranteed (since our
67
/// table size is a power of two) to visit every group of elements exactly once.
68
///
69
/// A triangular probe has us jump by 1 more group every time. So first we
70
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
71
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
72
///
73
/// Proof that the probe will visit every group in the table:
74
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
75
#[derive(Clone)]
76
struct ProbeSeq {
77
    pos: usize,
78
    stride: usize,
79
}
80
81
impl ProbeSeq {
82
    #[inline]
83
0
    fn move_next(&mut self, bucket_mask: usize) {
84
0
        // We should have found an empty bucket by now and ended the probe.
85
0
        debug_assert!(
86
0
            self.stride <= bucket_mask,
87
0
            "Went past end of probe sequence"
88
        );
89
90
0
        self.stride += Group::WIDTH;
91
0
        self.pos += self.stride;
92
0
        self.pos &= bucket_mask;
93
0
    }
Unexecuted instantiation: <hashbrown::raw::ProbeSeq>::move_next
Unexecuted instantiation: <hashbrown::raw::ProbeSeq>::move_next
94
}
95
96
/// Returns the number of buckets needed to hold the given number of items,
97
/// taking the maximum load factor into account.
98
///
99
/// Returns `None` if an overflow occurs.
100
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
101
#[cfg_attr(target_os = "emscripten", inline(never))]
102
#[cfg_attr(not(target_os = "emscripten"), inline)]
103
0
fn capacity_to_buckets(cap: usize, table_layout: TableLayout) -> Option<usize> {
104
0
    debug_assert_ne!(cap, 0);
105
106
    // For small tables we require at least 1 empty bucket so that lookups are
107
    // guaranteed to terminate if an element doesn't exist in the table.
108
0
    if cap < 15 {
109
        // Consider a small TableLayout like { size: 1, ctrl_align: 16 } on a
110
        // platform with Group::WIDTH of 16 (like x86_64 with SSE2). For small
111
        // bucket sizes, this ends up wasting quite a few bytes just to pad to
112
        // the relatively larger ctrl_align:
113
        //
114
        // | capacity | buckets | bytes allocated | bytes per item |
115
        // | -------- | ------- | --------------- | -------------- |
116
        // |        3 |       4 |              36 | (Yikes!)  12.0 |
117
        // |        7 |       8 |              40 | (Poor)     5.7 |
118
        // |       14 |      16 |              48 |            3.4 |
119
        // |       28 |      32 |              80 |            3.3 |
120
        //
121
        // In general, buckets * table_layout.size >= table_layout.ctrl_align
122
        // must be true to avoid these edges. This is implemented by adjusting
123
        // the minimum capacity upwards for small items. This code only needs
124
        // to handle ctrl_align which are less than or equal to Group::WIDTH,
125
        // because valid layout sizes are always a multiple of the alignment,
126
        // so anything with alignment over the Group::WIDTH won't hit this edge
127
        // case.
128
129
        // This is brittle, e.g. if we ever add 32 byte groups, it will select
130
        // 3 regardless of the table_layout.size.
131
0
        let min_cap = match (Group::WIDTH, table_layout.size) {
132
0
            (16, 0..=1) => 14,
133
0
            (16, 2..=3) => 7,
134
0
            (8, 0..=1) => 7,
135
0
            _ => 3,
136
        };
137
0
        let cap = min_cap.max(cap);
138
0
        // We don't bother with a table size of 2 buckets since that can only
139
0
        // hold a single element. Instead, we skip directly to a 4 bucket table
140
0
        // which can hold 3 elements.
141
0
        return Some(if cap < 4 {
142
0
            4
143
0
        } else if cap < 8 {
144
0
            8
145
        } else {
146
0
            16
147
        });
148
0
    }
149
150
    // Otherwise require 1/8 buckets to be empty (87.5% load)
151
    //
152
    // Be careful when modifying this, calculate_layout relies on the
153
    // overflow check here.
154
0
    let adjusted_cap = cap.checked_mul(8)? / 7;
155
156
    // Any overflows will have been caught by the checked_mul. Also, any
157
    // rounding errors from the division above will be cleaned up by
158
    // next_power_of_two (which can't overflow because of the previous division).
159
0
    Some(adjusted_cap.next_power_of_two())
160
0
}
Unexecuted instantiation: hashbrown::raw::capacity_to_buckets
Unexecuted instantiation: hashbrown::raw::capacity_to_buckets
161
162
/// Returns the maximum effective capacity for the given bucket mask, taking
163
/// the maximum load factor into account.
164
#[inline]
165
0
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
166
0
    if bucket_mask < 8 {
167
        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
168
        // Keep in mind that the bucket mask is one less than the bucket count.
169
0
        bucket_mask
170
    } else {
171
        // For larger tables we reserve 12.5% of the slots as empty.
172
0
        ((bucket_mask + 1) / 8) * 7
173
    }
174
0
}
Unexecuted instantiation: hashbrown::raw::bucket_mask_to_capacity
Unexecuted instantiation: hashbrown::raw::bucket_mask_to_capacity
175
176
/// Helper which allows the max calculation for `ctrl_align` to be statically computed for each `T`
177
/// while keeping the rest of `calculate_layout_for` independent of `T`
178
#[derive(Copy, Clone)]
179
struct TableLayout {
180
    size: usize,
181
    ctrl_align: usize,
182
}
183
184
impl TableLayout {
185
    #[inline]
186
0
    const fn new<T>() -> Self {
187
0
        let layout = Layout::new::<T>();
188
0
        Self {
189
0
            size: layout.size(),
190
0
            ctrl_align: if layout.align() > Group::WIDTH {
191
0
                layout.align()
192
            } else {
193
0
                Group::WIDTH
194
            },
195
        }
196
0
    }
197
198
    #[inline]
199
0
    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
200
0
        debug_assert!(buckets.is_power_of_two());
201
202
0
        let TableLayout { size, ctrl_align } = self;
203
        // Manual layout calculation since Layout methods are not yet stable.
204
0
        let ctrl_offset =
205
0
            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
206
0
        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
207
208
        // We need an additional check to ensure that the allocation doesn't
209
        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
210
0
        if len > isize::MAX as usize - (ctrl_align - 1) {
211
0
            return None;
212
0
        }
213
0
214
0
        Some((
215
0
            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
216
0
            ctrl_offset,
217
0
        ))
218
0
    }
Unexecuted instantiation: <hashbrown::raw::TableLayout>::calculate_layout_for
Unexecuted instantiation: <hashbrown::raw::TableLayout>::calculate_layout_for
219
}
220
221
/// A reference to an empty bucket into which an can be inserted.
222
pub struct InsertSlot {
223
    index: usize,
224
}
225
226
/// A reference to a hash table bucket containing a `T`.
227
///
228
/// This is usually just a pointer to the element itself. However if the element
229
/// is a ZST, then we instead track the index of the element in the table so
230
/// that `erase` works properly.
231
pub struct Bucket<T> {
232
    // Actually it is pointer to next element than element itself
233
    // this is needed to maintain pointer arithmetic invariants
234
    // keeping direct pointer to element introduces difficulty.
235
    // Using `NonNull` for variance and niche layout
236
    ptr: NonNull<T>,
237
}
238
239
// This Send impl is needed for rayon support. This is safe since Bucket is
240
// never exposed in a public API.
241
unsafe impl<T> Send for Bucket<T> {}
242
243
impl<T> Clone for Bucket<T> {
244
    #[inline]
245
0
    fn clone(&self) -> Self {
246
0
        Self { ptr: self.ptr }
247
0
    }
248
}
249
250
impl<T> Bucket<T> {
251
    /// Creates a [`Bucket`] that contain pointer to the data.
252
    /// The pointer calculation is performed by calculating the
253
    /// offset from given `base` pointer (convenience for
254
    /// `base.as_ptr().sub(index)`).
255
    ///
256
    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
257
    /// offset of `3 * size_of::<T>()` bytes.
258
    ///
259
    /// If the `T` is a ZST, then we instead track the index of the element
260
    /// in the table so that `erase` works properly (return
261
    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
262
    ///
263
    /// # Safety
264
    ///
265
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
266
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
267
    /// rules of [`NonNull::new_unchecked`] function.
268
    ///
269
    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
270
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
271
    /// logic of the work of this crate, the following rules are necessary and
272
    /// sufficient:
273
    ///
274
    /// * the `base` pointer must not be `dangling` and must points to the
275
    ///   end of the first `value element` from the `data part` of the table, i.e.
276
    ///   must be the pointer that returned by [`RawTable::data_end`] or by
277
    ///   [`RawTableInner::data_end<T>`];
278
    ///
279
    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
280
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
281
    ///   must be no greater than the number returned by the function
282
    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
283
    ///
284
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
285
    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
286
    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
287
    /// must be no greater than the number returned by the function
288
    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
289
    ///
290
    /// [`Bucket`]: crate::raw::Bucket
291
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
292
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
293
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
294
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
295
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
296
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
297
    #[inline]
298
0
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
299
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
300
        // the data part of the table (we start counting from "0", so that
301
        // in the expression T[last], the "last" index actually one less than the
302
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
303
        //
304
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
305
        //                   points here in the data part of the table
306
        //                   (to the start of T1)
307
        //                        |
308
        //                        |        `base: NonNull<T>` must point here
309
        //                        |         (to the end of T0 or to the start of C0)
310
        //                        v         v
311
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
312
        //                           ^
313
        //                           `from_base_index(base, 1)` returns a pointer
314
        //                           that points here in the data part of the table
315
        //                           (to the end of T1)
316
        //
317
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
318
        // or metadata for data.
319
0
        let ptr = if T::IS_ZERO_SIZED {
320
            // won't overflow because index must be less than length (bucket_mask)
321
            // and bucket_mask is guaranteed to be less than `isize::MAX`
322
            // (see TableLayout::calculate_layout_for method)
323
0
            invalid_mut(index + 1)
324
        } else {
325
0
            base.as_ptr().sub(index)
326
        };
327
0
        Self {
328
0
            ptr: NonNull::new_unchecked(ptr),
329
0
        }
330
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::from_base_index
331
332
    /// Calculates the index of a [`Bucket`] as distance between two pointers
333
    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
334
    /// The returned value is in units of T: the distance in bytes divided by
335
    /// [`core::mem::size_of::<T>()`].
336
    ///
337
    /// If the `T` is a ZST, then we return the index of the element in
338
    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
339
    ///
340
    /// This function is the inverse of [`from_base_index`].
341
    ///
342
    /// # Safety
343
    ///
344
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
345
    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
346
    ///
347
    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
348
    /// method, as well as for the correct logic of the work of this crate, the
349
    /// following rules are necessary and sufficient:
350
    ///
351
    /// * `base` contained pointer must not be `dangling` and must point to the
352
    ///   end of the first `element` from the `data part` of the table, i.e.
353
    ///   must be a pointer that returns by [`RawTable::data_end`] or by
354
    ///   [`RawTableInner::data_end<T>`];
355
    ///
356
    /// * `self` also must not contain dangling pointer;
357
    ///
358
    /// * both `self` and `base` must be created from the same [`RawTable`]
359
    ///   (or [`RawTableInner`]).
360
    ///
361
    /// If `mem::size_of::<T>() == 0`, this function is always safe.
362
    ///
363
    /// [`Bucket`]: crate::raw::Bucket
364
    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
365
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
366
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
367
    /// [`RawTable`]: crate::raw::RawTable
368
    /// [`RawTableInner`]: RawTableInner
369
    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
370
    #[inline]
371
0
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
372
0
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
373
0
        // `element` in the data part of the table (we start counting from "0", so
374
0
        // that in the expression T[last], the "last" index actually is one less than the
375
0
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
376
0
        // For example for 5th element in table calculation is performed like this:
377
0
        //
378
0
        //                        mem::size_of::<T>()
379
0
        //                          |
380
0
        //                          |         `self = from_base_index(base, 5)` that returns pointer
381
0
        //                          |         that points here in the data part of the table
382
0
        //                          |         (to the end of T5)
383
0
        //                          |           |                    `base: NonNull<T>` must point here
384
0
        //                          v           |                    (to the end of T0 or to the start of C0)
385
0
        //                        /???\         v                      v
386
0
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
387
0
        //                                      \__________  __________/
388
0
        //                                                 \/
389
0
        //                                     `bucket.to_base_index(base)` = 5
390
0
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
391
0
        //
392
0
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
393
0
        if T::IS_ZERO_SIZED {
394
            // this can not be UB
395
0
            self.ptr.as_ptr() as usize - 1
396
        } else {
397
0
            offset_from(base.as_ptr(), self.ptr.as_ptr())
398
        }
399
0
    }
400
401
    /// Acquires the underlying raw pointer `*mut T` to `data`.
402
    ///
403
    /// # Note
404
    ///
405
    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
406
    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
407
    /// for properly dropping the data we also need to clear `data` control bytes. If we
408
    /// drop data, but do not clear `data control byte` it leads to double drop when
409
    /// [`RawTable`] goes out of scope.
410
    ///
411
    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
412
    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
413
    /// will not re-evaluate where the new value should go, meaning the value may become
414
    /// "lost" if their location does not reflect their state.
415
    ///
416
    /// [`RawTable`]: crate::raw::RawTable
417
    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
418
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
419
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
420
    #[inline]
421
0
    pub fn as_ptr(&self) -> *mut T {
422
0
        if T::IS_ZERO_SIZED {
423
            // Just return an arbitrary ZST pointer which is properly aligned
424
            // invalid pointer is good enough for ZST
425
0
            invalid_mut(mem::align_of::<T>())
426
        } else {
427
0
            unsafe { self.ptr.as_ptr().sub(1) }
428
        }
429
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_ptr
430
431
    /// Acquires the underlying non-null pointer `*mut T` to `data`.
432
    #[inline]
433
0
    fn as_non_null(&self) -> NonNull<T> {
434
0
        // SAFETY: `self.ptr` is already a `NonNull`
435
0
        unsafe { NonNull::new_unchecked(self.as_ptr()) }
436
0
    }
437
438
    /// Create a new [`Bucket`] that is offset from the `self` by the given
439
    /// `offset`. The pointer calculation is performed by calculating the
440
    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
441
    /// This function is used for iterators.
442
    ///
443
    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
444
    /// offset of `3 * size_of::<T>()` bytes.
445
    ///
446
    /// # Safety
447
    ///
448
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
449
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
450
    /// rules of [`NonNull::new_unchecked`] function.
451
    ///
452
    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
453
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
454
    /// logic of the work of this crate, the following rules are necessary and
455
    /// sufficient:
456
    ///
457
    /// * `self` contained pointer must not be `dangling`;
458
    ///
459
    /// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
460
    ///   i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other
461
    ///   words, `self.to_base_index() + offset + 1` must be no greater than the number returned
462
    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
463
    ///
464
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
465
    /// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
466
    /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words,
467
    /// `self.to_base_index() + offset + 1` must be no greater than the number returned by the
468
    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
469
    ///
470
    /// [`Bucket`]: crate::raw::Bucket
471
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
472
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
473
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
474
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
475
    #[inline]
476
0
    unsafe fn next_n(&self, offset: usize) -> Self {
477
0
        let ptr = if T::IS_ZERO_SIZED {
478
            // invalid pointer is good enough for ZST
479
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
480
        } else {
481
0
            self.ptr.as_ptr().sub(offset)
482
        };
483
0
        Self {
484
0
            ptr: NonNull::new_unchecked(ptr),
485
0
        }
486
0
    }
487
488
    /// Executes the destructor (if any) of the pointed-to `data`.
489
    ///
490
    /// # Safety
491
    ///
492
    /// See [`ptr::drop_in_place`] for safety concerns.
493
    ///
494
    /// You should use [`RawTable::erase`] instead of this function,
495
    /// or be careful with calling this function directly, because for
496
    /// properly dropping the data we need also clear `data` control bytes.
497
    /// If we drop data, but do not erase `data control byte` it leads to
498
    /// double drop when [`RawTable`] goes out of scope.
499
    ///
500
    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
501
    /// [`RawTable`]: crate::raw::RawTable
502
    /// [`RawTable::erase`]: crate::raw::RawTable::erase
503
    #[cfg_attr(feature = "inline-more", inline)]
504
0
    pub(crate) unsafe fn drop(&self) {
505
0
        self.as_ptr().drop_in_place();
506
0
    }
507
508
    /// Reads the `value` from `self` without moving it. This leaves the
509
    /// memory in `self` unchanged.
510
    ///
511
    /// # Safety
512
    ///
513
    /// See [`ptr::read`] for safety concerns.
514
    ///
515
    /// You should use [`RawTable::remove`] instead of this function,
516
    /// or be careful with calling this function directly, because compiler
517
    /// calls its destructor when the read `value` goes out of scope. It
518
    /// can cause double dropping when [`RawTable`] goes out of scope,
519
    /// because of not erased `data control byte`.
520
    ///
521
    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
522
    /// [`RawTable`]: crate::raw::RawTable
523
    /// [`RawTable::remove`]: crate::raw::RawTable::remove
524
    #[inline]
525
0
    pub(crate) unsafe fn read(&self) -> T {
526
0
        self.as_ptr().read()
527
0
    }
528
529
    /// Overwrites a memory location with the given `value` without reading
530
    /// or dropping the old value (like [`ptr::write`] function).
531
    ///
532
    /// # Safety
533
    ///
534
    /// See [`ptr::write`] for safety concerns.
535
    ///
536
    /// # Note
537
    ///
538
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
539
    /// those for the old `T` value, as the map will not re-evaluate where the new
540
    /// value should go, meaning the value may become "lost" if their location
541
    /// does not reflect their state.
542
    ///
543
    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
544
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
545
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
546
    #[inline]
547
0
    pub(crate) unsafe fn write(&self, val: T) {
548
0
        self.as_ptr().write(val);
549
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::write
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::write
550
551
    /// Returns a shared immutable reference to the `value`.
552
    ///
553
    /// # Safety
554
    ///
555
    /// See [`NonNull::as_ref`] for safety concerns.
556
    ///
557
    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
558
    #[inline]
559
0
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
560
0
        &*self.as_ptr()
561
0
    }
Unexecuted instantiation: <hashbrown::raw::Bucket<usize>>::as_ref
Unexecuted instantiation: <hashbrown::raw::Bucket<_>>::as_ref
562
563
    /// Returns a unique mutable reference to the `value`.
564
    ///
565
    /// # Safety
566
    ///
567
    /// See [`NonNull::as_mut`] for safety concerns.
568
    ///
569
    /// # Note
570
    ///
571
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
572
    /// those for the old `T` value, as the map will not re-evaluate where the new
573
    /// value should go, meaning the value may become "lost" if their location
574
    /// does not reflect their state.
575
    ///
576
    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
577
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
578
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
579
    #[inline]
580
0
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
581
0
        &mut *self.as_ptr()
582
0
    }
583
}
584
585
/// A raw hash table with an unsafe API.
586
pub struct RawTable<T, A: Allocator = Global> {
587
    table: RawTableInner,
588
    alloc: A,
589
    // Tell dropck that we own instances of T.
590
    marker: PhantomData<T>,
591
}
592
593
/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
594
/// of how many different key-value types are used.
595
struct RawTableInner {
596
    // Mask to get an index from a hash value. The value is one less than the
597
    // number of buckets in the table.
598
    bucket_mask: usize,
599
600
    // [Padding], T_n, ..., T1, T0, C0, C1, ...
601
    //                              ^ points here
602
    ctrl: NonNull<u8>,
603
604
    // Number of elements that can be inserted before we need to grow the table
605
    growth_left: usize,
606
607
    // Number of elements in the table, only really used by len()
608
    items: usize,
609
}
610
611
impl<T> RawTable<T, Global> {
612
    /// Creates a new empty hash table without allocating any memory.
613
    ///
614
    /// In effect this returns a table with exactly 1 bucket. However we can
615
    /// leave the data pointer dangling since that bucket is never written to
616
    /// due to our load factor forcing us to always have at least 1 free bucket.
617
    #[inline]
618
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
619
0
    pub const fn new() -> Self {
620
0
        Self {
621
0
            table: RawTableInner::NEW,
622
0
            alloc: Global,
623
0
            marker: PhantomData,
624
0
        }
625
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::new
Unexecuted instantiation: <hashbrown::raw::RawTable<_>>::new
626
627
    /// Allocates a new hash table with at least enough capacity for inserting
628
    /// the given number of elements without reallocating.
629
0
    pub fn with_capacity(capacity: usize) -> Self {
630
0
        Self::with_capacity_in(capacity, Global)
631
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity
Unexecuted instantiation: <hashbrown::raw::RawTable<_>>::with_capacity
632
}
633
634
impl<T, A: Allocator> RawTable<T, A> {
635
    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
636
637
    /// Creates a new empty hash table without allocating any memory, using the
638
    /// given allocator.
639
    ///
640
    /// In effect this returns a table with exactly 1 bucket. However we can
641
    /// leave the data pointer dangling since that bucket is never written to
642
    /// due to our load factor forcing us to always have at least 1 free bucket.
643
    #[inline]
644
    #[cfg_attr(feature = "rustc-dep-of-std", rustc_const_stable_indirect)]
645
0
    pub const fn new_in(alloc: A) -> Self {
646
0
        Self {
647
0
            table: RawTableInner::NEW,
648
0
            alloc,
649
0
            marker: PhantomData,
650
0
        }
651
0
    }
652
653
    /// Allocates a new hash table with the given number of buckets.
654
    ///
655
    /// The control bytes are left uninitialized.
656
    #[cfg_attr(feature = "inline-more", inline)]
657
0
    unsafe fn new_uninitialized(
658
0
        alloc: A,
659
0
        buckets: usize,
660
0
        fallibility: Fallibility,
661
0
    ) -> Result<Self, TryReserveError> {
662
0
        debug_assert!(buckets.is_power_of_two());
663
664
        Ok(Self {
665
0
            table: RawTableInner::new_uninitialized(
666
0
                &alloc,
667
0
                Self::TABLE_LAYOUT,
668
0
                buckets,
669
0
                fallibility,
670
0
            )?,
671
0
            alloc,
672
0
            marker: PhantomData,
673
        })
674
0
    }
675
676
    /// Allocates a new hash table using the given allocator, with at least enough capacity for
677
    /// inserting the given number of elements without reallocating.
678
0
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
679
0
        Self {
680
0
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
681
0
            alloc,
682
0
            marker: PhantomData,
683
0
        }
684
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::with_capacity_in
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::with_capacity_in
685
686
    /// Returns a reference to the underlying allocator.
687
    #[inline]
688
0
    pub fn allocator(&self) -> &A {
689
0
        &self.alloc
690
0
    }
691
692
    /// Returns pointer to one past last `data` element in the table as viewed from
693
    /// the start point of the allocation.
694
    ///
695
    /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
696
    /// otherwise using it may result in [`undefined behavior`].
697
    ///
698
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
699
    #[inline]
700
0
    pub fn data_end(&self) -> NonNull<T> {
701
0
        //                        `self.table.ctrl.cast()` returns pointer that
702
0
        //                        points here (to the end of `T0`)
703
0
        //                          ∨
704
0
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
705
0
        //                           \________  ________/
706
0
        //                                    \/
707
0
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
708
0
        //
709
0
        // where: T0...T_n  - our stored data;
710
0
        //        CT0...CT_n - control bytes or metadata for `data`.
711
0
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
712
0
        //                        with loading `Group` bytes from the heap works properly, even if the result
713
0
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
714
0
        //                        `RawTableInner::set_ctrl` function.
715
0
        //
716
0
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
717
0
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
718
0
        self.table.ctrl.cast()
719
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::data_end
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::data_end
720
721
    /// Returns pointer to start of data table.
722
    #[inline]
723
    #[cfg(feature = "nightly")]
724
    pub unsafe fn data_start(&self) -> NonNull<T> {
725
        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
726
    }
727
728
    /// Returns the total amount of memory allocated internally by the hash
729
    /// table, in bytes.
730
    ///
731
    /// The returned number is informational only. It is intended to be
732
    /// primarily used for memory profiling.
733
    #[inline]
734
0
    pub fn allocation_size(&self) -> usize {
735
0
        // SAFETY: We use the same `table_layout` that was used to allocate
736
0
        // this table.
737
0
        unsafe { self.table.allocation_size_or_zero(Self::TABLE_LAYOUT) }
738
0
    }
739
740
    /// Returns the index of a bucket from a `Bucket`.
741
    #[inline]
742
0
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
743
0
        bucket.to_base_index(self.data_end())
744
0
    }
745
746
    /// Returns a pointer to an element in the table.
747
    ///
748
    /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
749
    /// otherwise using it may result in [`undefined behavior`].
750
    ///
751
    /// # Safety
752
    ///
753
    /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
754
    /// following safety rules:
755
    ///
756
    /// * The table must already be allocated;
757
    ///
758
    /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
759
    ///   function, i.e. `(index + 1) <= self.buckets()`.
760
    ///
761
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
762
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
763
    ///
764
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
765
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
766
    /// `(index + 1) <= self.buckets()`.
767
    ///
768
    /// [`RawTable::buckets`]: RawTable::buckets
769
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
770
    #[inline]
771
0
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
772
0
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
773
0
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
774
0
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
775
0
        //
776
0
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
777
0
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
778
0
        //                  |
779
0
        //                  |               `base = self.data_end()` points here
780
0
        //                  |               (to the start of CT0 or to the end of T0)
781
0
        //                  v                 v
782
0
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
783
0
        //                     ^                                              \__________  __________/
784
0
        //        `table.bucket(3)` returns a pointer that points                        \/
785
0
        //         here in the `data` part of the `RawTable` (to              additional control bytes
786
0
        //         the end of T3)                                              `m = Group::WIDTH - 1`
787
0
        //
788
0
        // where: T0...T_n  - our stored data;
789
0
        //        CT0...CT_n - control bytes or metadata for `data`;
790
0
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
791
0
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
792
0
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
793
0
        //
794
0
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
795
0
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
796
0
        debug_assert_ne!(self.table.bucket_mask, 0);
797
0
        debug_assert!(index < self.buckets());
798
0
        Bucket::from_base_index(self.data_end(), index)
799
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::bucket
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::bucket
800
801
    /// Erases an element from the table without dropping it.
802
    #[cfg_attr(feature = "inline-more", inline)]
803
0
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
804
0
        let index = self.bucket_index(item);
805
0
        self.table.erase(index);
806
0
    }
807
808
    /// Erases an element from the table, dropping it in place.
809
    #[cfg_attr(feature = "inline-more", inline)]
810
    #[allow(clippy::needless_pass_by_value)]
811
0
    pub unsafe fn erase(&mut self, item: Bucket<T>) {
812
0
        // Erase the element from the table first since drop might panic.
813
0
        self.erase_no_drop(&item);
814
0
        item.drop();
815
0
    }
816
817
    /// Removes an element from the table, returning it.
818
    ///
819
    /// This also returns an `InsertSlot` pointing to the newly free bucket.
820
    #[cfg_attr(feature = "inline-more", inline)]
821
    #[allow(clippy::needless_pass_by_value)]
822
0
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
823
0
        self.erase_no_drop(&item);
824
0
        (
825
0
            item.read(),
826
0
            InsertSlot {
827
0
                index: self.bucket_index(&item),
828
0
            },
829
0
        )
830
0
    }
831
832
    /// Finds and removes an element from the table, returning it.
833
    #[cfg_attr(feature = "inline-more", inline)]
834
0
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
835
0
        // Avoid `Option::map` because it bloats LLVM IR.
836
0
        match self.find(hash, eq) {
837
0
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
838
0
            None => None,
839
        }
840
0
    }
841
842
    /// Marks all table buckets as empty without dropping their contents.
843
    #[cfg_attr(feature = "inline-more", inline)]
844
0
    pub fn clear_no_drop(&mut self) {
845
0
        self.table.clear_no_drop();
846
0
    }
847
848
    /// Removes all elements from the table without freeing the backing memory.
849
    #[cfg_attr(feature = "inline-more", inline)]
850
0
    pub fn clear(&mut self) {
851
0
        if self.is_empty() {
852
            // Special case empty table to avoid surprising O(capacity) time.
853
0
            return;
854
0
        }
855
0
        // Ensure that the table is reset even if one of the drops panic
856
0
        let mut self_ = guard(self, |self_| self_.clear_no_drop());
857
0
        unsafe {
858
0
            // SAFETY: ScopeGuard sets to zero the `items` field of the table
859
0
            // even in case of panic during the dropping of the elements so
860
0
            // that there will be no double drop of the elements.
861
0
            self_.table.drop_elements::<T>();
862
0
        }
863
0
    }
864
865
    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
866
    #[cfg_attr(feature = "inline-more", inline)]
867
0
    pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
868
0
        // Calculate the minimal number of elements that we need to reserve
869
0
        // space for.
870
0
        let min_size = usize::max(self.table.items, min_size);
871
0
        if min_size == 0 {
872
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
873
0
            unsafe {
874
0
                // SAFETY:
875
0
                // 1. We call the function only once;
876
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
877
0
                //    and [`TableLayout`] that were used to allocate this table.
878
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
879
0
                //    because we have replaced the inner table with a new one.
880
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
881
0
            }
882
0
            return;
883
0
        }
884
885
        // Calculate the number of buckets that we need for this number of
886
        // elements. If the calculation overflows then the requested bucket
887
        // count must be larger than what we have right and nothing needs to be
888
        // done.
889
0
        let min_buckets = match capacity_to_buckets(min_size, Self::TABLE_LAYOUT) {
890
0
            Some(buckets) => buckets,
891
0
            None => return,
892
        };
893
894
        // If we have more buckets than we need, shrink the table.
895
0
        if min_buckets < self.buckets() {
896
            // Fast path if the table is empty
897
0
            if self.table.items == 0 {
898
0
                let new_inner =
899
0
                    RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
900
0
                let mut old_inner = mem::replace(&mut self.table, new_inner);
901
0
                unsafe {
902
0
                    // SAFETY:
903
0
                    // 1. We call the function only once;
904
0
                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
905
0
                    //    and [`TableLayout`] that were used to allocate this table.
906
0
                    // 3. If any elements' drop function panics, then there will only be a memory leak,
907
0
                    //    because we have replaced the inner table with a new one.
908
0
                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
909
0
                }
910
            } else {
911
                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
912
                unsafe {
913
                    // SAFETY:
914
                    // 1. We know for sure that `min_size >= self.table.items`.
915
                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
916
                    //    we will never expose RawTable::new_uninitialized in a public API.
917
0
                    if self
918
0
                        .resize(min_size, hasher, Fallibility::Infallible)
919
0
                        .is_err()
920
                    {
921
                        // SAFETY: The result of calling the `resize` function cannot be an error
922
                        // because `fallibility == Fallibility::Infallible.
923
0
                        hint::unreachable_unchecked()
924
0
                    }
925
                }
926
            }
927
0
        }
928
0
    }
929
930
    /// Ensures that at least `additional` items can be inserted into the table
931
    /// without reallocation.
932
    #[cfg_attr(feature = "inline-more", inline)]
933
0
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
934
0
        if unlikely(additional > self.table.growth_left) {
935
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
936
            unsafe {
937
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
938
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
939
0
                if self
940
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
941
0
                    .is_err()
942
                {
943
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
944
0
                    hint::unreachable_unchecked()
945
0
                }
946
            }
947
0
        }
948
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<gimli::write::cfi::CommonInformationEntry, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<gimli::write::loc::LocationList, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<gimli::write::line::LineString, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<gimli::write::range::RangeList, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<gimli::write::abbrev::Abbreviation, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve::<indexmap::map::core::get_hash<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve::<_>
949
950
    /// Tries to ensure that at least `additional` items can be inserted into
951
    /// the table without reallocation.
952
    #[cfg_attr(feature = "inline-more", inline)]
953
0
    pub fn try_reserve(
954
0
        &mut self,
955
0
        additional: usize,
956
0
        hasher: impl Fn(&T) -> u64,
957
0
    ) -> Result<(), TryReserveError> {
958
0
        if additional > self.table.growth_left {
959
            // SAFETY: The [`RawTableInner`] must already have properly initialized control
960
            // bytes since we will never expose RawTable::new_uninitialized in a public API.
961
0
            unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
962
        } else {
963
0
            Ok(())
964
        }
965
0
    }
966
967
    /// Out-of-line slow path for `reserve` and `try_reserve`.
968
    ///
969
    /// # Safety
970
    ///
971
    /// The [`RawTableInner`] must have properly initialized control bytes,
972
    /// otherwise calling this function results in [`undefined behavior`]
973
    ///
974
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
975
    #[cold]
976
    #[inline(never)]
977
0
    unsafe fn reserve_rehash(
978
0
        &mut self,
979
0
        additional: usize,
980
0
        hasher: impl Fn(&T) -> u64,
981
0
        fallibility: Fallibility,
982
0
    ) -> Result<(), TryReserveError> {
983
0
        unsafe {
984
0
            // SAFETY:
985
0
            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
986
0
            //    [`TableLayout`] that were used to allocate this table.
987
0
            // 2. The `drop` function is the actual drop function of the elements stored in
988
0
            //    the table.
989
0
            // 3. The caller ensures that the control bytes of the `RawTableInner`
990
0
            //    are already initialized.
991
0
            self.table.reserve_rehash_inner(
992
0
                &self.alloc,
993
0
                additional,
994
0
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::cfi::CommonInformationEntry, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::loc::LocationList, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::line::LineString, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::range::RangeList, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::abbrev::Abbreviation, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve_rehash::<_>::{closure#0}
995
0
                fallibility,
996
0
                Self::TABLE_LAYOUT,
997
0
                if T::NEEDS_DROP {
998
0
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
999
                } else {
1000
0
                    None
1001
                },
1002
            )
1003
        }
1004
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::cfi::CommonInformationEntry, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::loc::LocationList, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::line::LineString, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::range::RangeList, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<gimli::write::abbrev::Abbreviation, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::reserve_rehash::<indexmap::map::core::get_hash<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::reserve_rehash::<_>
1005
1006
    /// Allocates a new table of a different size and moves the contents of the
1007
    /// current table into it.
1008
    ///
1009
    /// # Safety
1010
    ///
1011
    /// The [`RawTableInner`] must have properly initialized control bytes,
1012
    /// otherwise calling this function results in [`undefined behavior`]
1013
    ///
1014
    /// The caller of this function must ensure that `capacity >= self.table.items`
1015
    /// otherwise:
1016
    ///
1017
    /// * If `self.table.items != 0`, calling of this function with `capacity`
1018
    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1019
    ///
1020
    /// * If `self.table.items > capacity_to_buckets(capacity, Self::TABLE_LAYOUT)`
1021
    ///   calling this function are never return (will loop infinitely).
1022
    ///
1023
    /// See [`RawTableInner::find_insert_slot`] for more information.
1024
    ///
1025
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1026
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1027
0
    unsafe fn resize(
1028
0
        &mut self,
1029
0
        capacity: usize,
1030
0
        hasher: impl Fn(&T) -> u64,
1031
0
        fallibility: Fallibility,
1032
0
    ) -> Result<(), TryReserveError> {
1033
0
        // SAFETY:
1034
0
        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1035
0
        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1036
0
        //    [`TableLayout`] that were used to allocate this table.
1037
0
        // 3. The caller ensures that the control bytes of the `RawTableInner`
1038
0
        //    are already initialized.
1039
0
        self.table.resize_inner(
1040
0
            &self.alloc,
1041
0
            capacity,
1042
0
            &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1043
0
            fallibility,
1044
0
            Self::TABLE_LAYOUT,
1045
0
        )
1046
0
    }
1047
1048
    /// Inserts a new element into the table, and returns its raw bucket.
1049
    ///
1050
    /// This does not check if the given element already exists in the table.
1051
    #[cfg_attr(feature = "inline-more", inline)]
1052
0
    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1053
0
        unsafe {
1054
0
            // SAFETY:
1055
0
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1056
0
            //    we will never expose `RawTable::new_uninitialized` in a public API.
1057
0
            //
1058
0
            // 2. We reserve additional space (if necessary) right after calling this function.
1059
0
            let mut slot = self.table.find_insert_slot(hash);
1060
0
1061
0
            // We can avoid growing the table once we have reached our load factor if we are replacing
1062
0
            // a tombstone. This works since the number of EMPTY slots does not change in this case.
1063
0
            //
1064
0
            // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
1065
0
            // in the range `0..=self.buckets()`.
1066
0
            let old_ctrl = *self.table.ctrl(slot.index);
1067
0
            if unlikely(self.table.growth_left == 0 && old_ctrl.special_is_empty()) {
1068
0
                self.reserve(1, hasher);
1069
0
                // SAFETY: We know for sure that `RawTableInner` has control bytes
1070
0
                // initialized and that there is extra space in the table.
1071
0
                slot = self.table.find_insert_slot(hash);
1072
0
            }
1073
1074
0
            self.insert_in_slot(hash, slot, value)
1075
0
        }
1076
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::insert::<indexmap::map::core::get_hash<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::insert::<_>
1077
1078
    /// Inserts a new element into the table, and returns a mutable reference to it.
1079
    ///
1080
    /// This does not check if the given element already exists in the table.
1081
    #[cfg_attr(feature = "inline-more", inline)]
1082
0
    pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
1083
0
        unsafe { self.insert(hash, value, hasher).as_mut() }
1084
0
    }
1085
1086
    /// Inserts a new element into the table, without growing the table.
1087
    ///
1088
    /// There must be enough space in the table to insert the new element.
1089
    ///
1090
    /// This does not check if the given element already exists in the table.
1091
    #[cfg_attr(feature = "inline-more", inline)]
1092
    #[cfg(feature = "rustc-internal-api")]
1093
    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1094
        let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1095
        let bucket = self.table.bucket(index);
1096
1097
        // If we are replacing a DELETED entry then we don't need to update
1098
        // the load counter.
1099
        self.table.growth_left -= old_ctrl.special_is_empty() as usize;
1100
1101
        bucket.write(value);
1102
        self.table.items += 1;
1103
        bucket
1104
    }
1105
1106
    /// Temporary removes a bucket, applying the given function to the removed
1107
    /// element and optionally put back the returned value in the same bucket.
1108
    ///
1109
    /// Returns `true` if the bucket still contains an element
1110
    ///
1111
    /// This does not check if the given bucket is actually occupied.
1112
    #[cfg_attr(feature = "inline-more", inline)]
1113
0
    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1114
0
    where
1115
0
        F: FnOnce(T) -> Option<T>,
1116
0
    {
1117
0
        let index = self.bucket_index(&bucket);
1118
0
        let old_ctrl = *self.table.ctrl(index);
1119
0
        debug_assert!(self.is_bucket_full(index));
1120
0
        let old_growth_left = self.table.growth_left;
1121
0
        let item = self.remove(bucket).0;
1122
0
        if let Some(new_item) = f(item) {
1123
0
            self.table.growth_left = old_growth_left;
1124
0
            self.table.set_ctrl(index, old_ctrl);
1125
0
            self.table.items += 1;
1126
0
            self.bucket(index).write(new_item);
1127
0
            true
1128
        } else {
1129
0
            false
1130
        }
1131
0
    }
1132
1133
    /// Searches for an element in the table. If the element is not found,
1134
    /// returns `Err` with the position of a slot where an element with the
1135
    /// same hash could be inserted.
1136
    ///
1137
    /// This function may resize the table if additional space is required for
1138
    /// inserting an element.
1139
    #[inline]
1140
0
    pub fn find_or_find_insert_slot(
1141
0
        &mut self,
1142
0
        hash: u64,
1143
0
        mut eq: impl FnMut(&T) -> bool,
1144
0
        hasher: impl Fn(&T) -> u64,
1145
0
    ) -> Result<Bucket<T>, InsertSlot> {
1146
0
        self.reserve(1, hasher);
1147
0
1148
0
        unsafe {
1149
0
            // SAFETY:
1150
0
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1151
0
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1152
0
            //    never expose `RawTable::new_uninitialized` in a public API.
1153
0
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1154
0
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1155
0
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1156
0
            match self
1157
0
                .table
1158
0
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::cfi::CommonInformationEntry, (), gimli::write::cfi::CommonInformationEntry>::{closure#0}, indexmap::map::core::get_hash<gimli::write::cfi::CommonInformationEntry, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::loc::LocationList, (), gimli::write::loc::LocationList>::{closure#0}, indexmap::map::core::get_hash<gimli::write::loc::LocationList, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::line::LineString, (), gimli::write::line::LineString>::{closure#0}, indexmap::map::core::get_hash<gimli::write::line::LineString, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::range::RangeList, (), gimli::write::range::RangeList>::{closure#0}, indexmap::map::core::get_hash<gimli::write::range::RangeList, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::abbrev::Abbreviation, (), gimli::write::abbrev::Abbreviation>::{closure#0}, indexmap::map::core::get_hash<gimli::write::abbrev::Abbreviation, ()>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo, (gimli::write::line::LineString, gimli::write::line::DirectoryId)>::{closure#0}, indexmap::map::core::get_hash<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find_or_find_insert_slot::<_, _>::{closure#0}
1159
            {
1160
                // SAFETY: See explanation above.
1161
0
                Ok(index) => Ok(self.bucket(index)),
1162
0
                Err(slot) => Err(slot),
1163
            }
1164
        }
1165
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::cfi::CommonInformationEntry, (), gimli::write::cfi::CommonInformationEntry>::{closure#0}, indexmap::map::core::get_hash<gimli::write::cfi::CommonInformationEntry, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::loc::LocationList, (), gimli::write::loc::LocationList>::{closure#0}, indexmap::map::core::get_hash<gimli::write::loc::LocationList, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::line::LineString, (), gimli::write::line::LineString>::{closure#0}, indexmap::map::core::get_hash<gimli::write::line::LineString, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::range::RangeList, (), gimli::write::range::RangeList>::{closure#0}, indexmap::map::core::get_hash<gimli::write::range::RangeList, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<gimli::write::abbrev::Abbreviation, (), gimli::write::abbrev::Abbreviation>::{closure#0}, indexmap::map::core::get_hash<gimli::write::abbrev::Abbreviation, ()>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find_or_find_insert_slot::<indexmap::map::core::equivalent<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo, (gimli::write::line::LineString, gimli::write::line::DirectoryId)>::{closure#0}, indexmap::map::core::get_hash<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find_or_find_insert_slot::<_, _>
1166
1167
    /// Inserts a new element into the table in the given slot, and returns its
1168
    /// raw bucket.
1169
    ///
1170
    /// # Safety
1171
    ///
1172
    /// `slot` must point to a slot previously returned by
1173
    /// `find_or_find_insert_slot`, and no mutation of the table must have
1174
    /// occurred since that call.
1175
    #[inline]
1176
0
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1177
0
        let old_ctrl = *self.table.ctrl(slot.index);
1178
0
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1179
0
1180
0
        let bucket = self.bucket(slot.index);
1181
0
        bucket.write(value);
1182
0
        bucket
1183
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::insert_in_slot
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::insert_in_slot
1184
1185
    /// Searches for an element in the table.
1186
    #[inline]
1187
0
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1188
0
        unsafe {
1189
0
            // SAFETY:
1190
0
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1191
0
            //    will never expose `RawTable::new_uninitialized` in a public API.
1192
0
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1193
0
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1194
0
            //    is safe.
1195
0
            let result = self
1196
0
                .table
1197
0
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo, (gimli::write::line::LineString, gimli::write::line::DirectoryId)>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find::<_>::{closure#0}
1198
0
1199
0
            // Avoid `Option::map` because it bloats LLVM IR.
1200
0
            match result {
1201
                // SAFETY: See explanation above.
1202
0
                Some(index) => Some(self.bucket(index)),
1203
0
                None => None,
1204
            }
1205
        }
1206
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::find::<indexmap::map::core::equivalent<(gimli::write::line::LineString, gimli::write::line::DirectoryId), gimli::write::line::FileInfo, (gimli::write::line::LineString, gimli::write::line::DirectoryId)>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::find::<_>
1207
1208
    /// Gets a reference to an element in the table.
1209
    #[inline]
1210
0
    pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1211
0
        // Avoid `Option::map` because it bloats LLVM IR.
1212
0
        match self.find(hash, eq) {
1213
0
            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1214
0
            None => None,
1215
        }
1216
0
    }
1217
1218
    /// Gets a mutable reference to an element in the table.
1219
    #[inline]
1220
0
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1221
0
        // Avoid `Option::map` because it bloats LLVM IR.
1222
0
        match self.find(hash, eq) {
1223
0
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1224
0
            None => None,
1225
        }
1226
0
    }
1227
1228
    /// Attempts to get mutable references to `N` entries in the table at once.
1229
    ///
1230
    /// Returns an array of length `N` with the results of each query.
1231
    ///
1232
    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1233
    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1234
    ///
1235
    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1236
    /// the `i`th key to be looked up.
1237
0
    pub fn get_many_mut<const N: usize>(
1238
0
        &mut self,
1239
0
        hashes: [u64; N],
1240
0
        eq: impl FnMut(usize, &T) -> bool,
1241
0
    ) -> [Option<&'_ mut T>; N] {
1242
0
        unsafe {
1243
0
            let ptrs = self.get_many_mut_pointers(hashes, eq);
1244
1245
0
            for (i, cur) in ptrs.iter().enumerate() {
1246
0
                if cur.is_some() && ptrs[..i].contains(cur) {
1247
0
                    panic!("duplicate keys found");
1248
0
                }
1249
            }
1250
            // All bucket are distinct from all previous buckets so we're clear to return the result
1251
            // of the lookup.
1252
1253
0
            ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1254
0
        }
1255
0
    }
1256
1257
0
    pub unsafe fn get_many_unchecked_mut<const N: usize>(
1258
0
        &mut self,
1259
0
        hashes: [u64; N],
1260
0
        eq: impl FnMut(usize, &T) -> bool,
1261
0
    ) -> [Option<&'_ mut T>; N] {
1262
0
        let ptrs = self.get_many_mut_pointers(hashes, eq);
1263
0
        ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut()))
1264
0
    }
1265
1266
0
    unsafe fn get_many_mut_pointers<const N: usize>(
1267
0
        &mut self,
1268
0
        hashes: [u64; N],
1269
0
        mut eq: impl FnMut(usize, &T) -> bool,
1270
0
    ) -> [Option<NonNull<T>>; N] {
1271
0
        array::from_fn(|i| {
1272
0
            self.find(hashes[i], |k| eq(i, k))
1273
0
                .map(|cur| cur.as_non_null())
1274
0
        })
1275
0
    }
1276
1277
    /// Returns the number of elements the map can hold without reallocating.
1278
    ///
1279
    /// This number is a lower bound; the table might be able to hold
1280
    /// more, but is guaranteed to be able to hold at least this many.
1281
    #[inline]
1282
0
    pub fn capacity(&self) -> usize {
1283
0
        self.table.items + self.table.growth_left
1284
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::capacity
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::capacity
1285
1286
    /// Returns the number of elements in the table.
1287
    #[inline]
1288
0
    pub fn len(&self) -> usize {
1289
0
        self.table.items
1290
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize>>::len
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _>>::len
1291
1292
    /// Returns `true` if the table contains no elements.
1293
    #[inline]
1294
0
    pub fn is_empty(&self) -> bool {
1295
0
        self.len() == 0
1296
0
    }
1297
1298
    /// Returns the number of buckets in the table.
1299
    #[inline]
1300
0
    pub fn buckets(&self) -> usize {
1301
0
        self.table.bucket_mask + 1
1302
0
    }
1303
1304
    /// Checks whether the bucket at `index` is full.
1305
    ///
1306
    /// # Safety
1307
    ///
1308
    /// The caller must ensure `index` is less than the number of buckets.
1309
    #[inline]
1310
0
    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1311
0
        self.table.is_bucket_full(index)
1312
0
    }
1313
1314
    /// Returns an iterator over every element in the table. It is up to
1315
    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1316
    /// Because we cannot make the `next` method unsafe on the `RawIter`
1317
    /// struct, we have to make the `iter` method unsafe.
1318
    #[inline]
1319
0
    pub unsafe fn iter(&self) -> RawIter<T> {
1320
0
        // SAFETY:
1321
0
        // 1. The caller must uphold the safety contract for `iter` method.
1322
0
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1323
0
        //    we will never expose RawTable::new_uninitialized in a public API.
1324
0
        self.table.iter()
1325
0
    }
1326
1327
    /// Returns an iterator over occupied buckets that could match a given hash.
1328
    ///
1329
    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1330
    /// return items that have a hash value different than the one provided. You
1331
    /// should always validate the returned values before using them.
1332
    ///
1333
    /// It is up to the caller to ensure that the `RawTable` outlives the
1334
    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1335
    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1336
    #[cfg_attr(feature = "inline-more", inline)]
1337
0
    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1338
0
        RawIterHash::new(self, hash)
1339
0
    }
1340
1341
    /// Returns an iterator which removes all elements from the table without
1342
    /// freeing the memory.
1343
    #[cfg_attr(feature = "inline-more", inline)]
1344
0
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1345
0
        unsafe {
1346
0
            let iter = self.iter();
1347
0
            self.drain_iter_from(iter)
1348
0
        }
1349
0
    }
1350
1351
    /// Returns an iterator which removes all elements from the table without
1352
    /// freeing the memory.
1353
    ///
1354
    /// Iteration starts at the provided iterator's current location.
1355
    ///
1356
    /// It is up to the caller to ensure that the iterator is valid for this
1357
    /// `RawTable` and covers all items that remain in the table.
1358
    #[cfg_attr(feature = "inline-more", inline)]
1359
0
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1360
0
        debug_assert_eq!(iter.len(), self.len());
1361
0
        RawDrain {
1362
0
            iter,
1363
0
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1364
0
            orig_table: NonNull::from(&mut self.table),
1365
0
            marker: PhantomData,
1366
0
        }
1367
0
    }
1368
1369
    /// Returns an iterator which consumes all elements from the table.
1370
    ///
1371
    /// Iteration starts at the provided iterator's current location.
1372
    ///
1373
    /// It is up to the caller to ensure that the iterator is valid for this
1374
    /// `RawTable` and covers all items that remain in the table.
1375
0
    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1376
0
        debug_assert_eq!(iter.len(), self.len());
1377
1378
0
        let allocation = self.into_allocation();
1379
0
        RawIntoIter {
1380
0
            iter,
1381
0
            allocation,
1382
0
            marker: PhantomData,
1383
0
        }
1384
0
    }
1385
1386
    /// Converts the table into a raw allocation. The contents of the table
1387
    /// should be dropped using a `RawIter` before freeing the allocation.
1388
    #[cfg_attr(feature = "inline-more", inline)]
1389
0
    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1390
0
        let alloc = if self.table.is_empty_singleton() {
1391
0
            None
1392
        } else {
1393
            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1394
0
            let (layout, ctrl_offset) =
1395
0
                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1396
0
                    Some(lco) => lco,
1397
0
                    None => unsafe { hint::unreachable_unchecked() },
1398
                };
1399
0
            Some((
1400
0
                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset).cast()) },
1401
0
                layout,
1402
0
                unsafe { ptr::read(&self.alloc) },
1403
0
            ))
1404
        };
1405
0
        mem::forget(self);
1406
0
        alloc
1407
0
    }
1408
}
1409
1410
unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1411
where
1412
    T: Send,
1413
    A: Send,
1414
{
1415
}
1416
unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1417
where
1418
    T: Sync,
1419
    A: Sync,
1420
{
1421
}
1422
1423
impl RawTableInner {
1424
    const NEW: Self = RawTableInner::new();
1425
1426
    /// Creates a new empty hash table without allocating any memory.
1427
    ///
1428
    /// In effect this returns a table with exactly 1 bucket. However we can
1429
    /// leave the data pointer dangling since that bucket is never accessed
1430
    /// due to our load factor forcing us to always have at least 1 free bucket.
1431
    #[inline]
1432
0
    const fn new() -> Self {
1433
0
        Self {
1434
0
            // Be careful to cast the entire slice to a raw pointer.
1435
0
            ctrl: unsafe {
1436
0
                NonNull::new_unchecked(Group::static_empty().as_ptr().cast_mut().cast())
1437
0
            },
1438
0
            bucket_mask: 0,
1439
0
            items: 0,
1440
0
            growth_left: 0,
1441
0
        }
1442
0
    }
1443
}
1444
1445
impl RawTableInner {
1446
    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1447
    /// The control bytes and buckets are left uninitialized.
1448
    ///
1449
    /// # Safety
1450
    ///
1451
    /// The caller of this function must ensure that the `buckets` is power of two
1452
    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1453
    /// Group::WIDTH` with the [`Tag::EMPTY`] bytes.
1454
    ///
1455
    /// See also [`Allocator`] API for other safety concerns.
1456
    ///
1457
    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1458
    #[cfg_attr(feature = "inline-more", inline)]
1459
0
    unsafe fn new_uninitialized<A>(
1460
0
        alloc: &A,
1461
0
        table_layout: TableLayout,
1462
0
        buckets: usize,
1463
0
        fallibility: Fallibility,
1464
0
    ) -> Result<Self, TryReserveError>
1465
0
    where
1466
0
        A: Allocator,
1467
0
    {
1468
0
        debug_assert!(buckets.is_power_of_two());
1469
1470
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1471
0
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1472
0
            Some(lco) => lco,
1473
0
            None => return Err(fallibility.capacity_overflow()),
1474
        };
1475
1476
0
        let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1477
0
            Ok(block) => block.cast(),
1478
0
            Err(_) => return Err(fallibility.alloc_err(layout)),
1479
        };
1480
1481
        // SAFETY: null pointer will be caught in above check
1482
0
        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1483
0
        Ok(Self {
1484
0
            ctrl,
1485
0
            bucket_mask: buckets - 1,
1486
0
            items: 0,
1487
0
            growth_left: bucket_mask_to_capacity(buckets - 1),
1488
0
        })
1489
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::new_uninitialized::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::new_uninitialized::<_>
1490
1491
    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1492
    /// capacity for inserting the given number of elements without reallocating.
1493
    ///
1494
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1495
    #[inline]
1496
0
    fn fallible_with_capacity<A>(
1497
0
        alloc: &A,
1498
0
        table_layout: TableLayout,
1499
0
        capacity: usize,
1500
0
        fallibility: Fallibility,
1501
0
    ) -> Result<Self, TryReserveError>
1502
0
    where
1503
0
        A: Allocator,
1504
0
    {
1505
0
        if capacity == 0 {
1506
0
            Ok(Self::NEW)
1507
        } else {
1508
            // SAFETY: We checked that we could successfully allocate the new table, and then
1509
            // initialized all control bytes with the constant `Tag::EMPTY` byte.
1510
            unsafe {
1511
0
                let buckets = capacity_to_buckets(capacity, table_layout)
1512
0
                    .ok_or_else(|| fallibility.capacity_overflow())?;
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<hashbrown::raw::alloc::inner::Global>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<_>::{closure#0}
1513
1514
0
                let mut result =
1515
0
                    Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1516
                // SAFETY: We checked that the table is allocated and therefore the table already has
1517
                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1518
                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1519
0
                result.ctrl_slice().fill_empty();
1520
0
1521
0
                Ok(result)
1522
            }
1523
        }
1524
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fallible_with_capacity::<_>
1525
1526
    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1527
    /// the given number of elements without reallocating.
1528
    ///
1529
    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1530
    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1531
    /// handle memory allocation failure.
1532
    ///
1533
    /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes.
1534
    ///
1535
    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1536
    /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
1537
0
    fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
1538
0
    where
1539
0
        A: Allocator,
1540
0
    {
1541
0
        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1542
0
        match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
1543
0
            Ok(table_inner) => table_inner,
1544
            // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
1545
0
            Err(_) => unsafe { hint::unreachable_unchecked() },
1546
        }
1547
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::with_capacity::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::with_capacity::<_>
1548
1549
    /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
1550
    ///
1551
    /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
1552
    /// bytes outside the range of the table are filled with [`Tag::EMPTY`] entries. These will unfortunately
1553
    /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
1554
    /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
1555
    /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
1556
    /// We detect this situation here and perform a second scan starting at the beginning of the table.
1557
    /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
1558
    /// trailing control bytes (containing [`Tag::EMPTY`] bytes).
1559
    ///
1560
    /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
1561
    /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
1562
    /// `Safety`).
1563
    ///
1564
    /// # Warning
1565
    ///
1566
    /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
1567
    /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
1568
    /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
1569
    /// index will cause immediate [`undefined behavior`].
1570
    ///
1571
    /// # Safety
1572
    ///
1573
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
1574
    /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
1575
    /// of this crate, the following rules are necessary and sufficient:
1576
    ///
1577
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
1578
    ///   function results in [`undefined behavior`].
1579
    ///
1580
    /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
1581
    ///   (after the `find_insert_slot_in_group` function, but before insertion into the table).
1582
    ///
1583
    /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
1584
    ///   (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
1585
    ///
1586
    /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
1587
    /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
1588
    /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
1589
    ///
1590
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1591
    /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
1592
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1593
    #[inline]
1594
0
    unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1595
0
        // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1596
0
        if unlikely(self.is_bucket_full(index)) {
1597
0
            debug_assert!(self.bucket_mask < Group::WIDTH);
1598
            // SAFETY:
1599
            //
1600
            // * Since the caller of this function ensures that the control bytes are properly
1601
            //   initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1602
            //   bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1603
            //   and points to the properly initialized control bytes (see also
1604
            //   `TableLayout::calculate_layout_for` and `ptr::read`);
1605
            //
1606
            // * Because the caller of this function ensures that the index was provided by the
1607
            //   `self.find_insert_slot_in_group()` function, so for for tables larger than the
1608
            //   group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1609
            //   branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
1610
            //   cannot return a full bucket index. For tables smaller than the group width, calling
1611
            //   the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1612
            //   the range of the table are filled with EMPTY bytes (and we know for sure that there
1613
            //   is at least one FULL bucket), so this second scan either finds an empty slot (due to
1614
            //   the load factor) or hits the trailing control bytes (containing EMPTY).
1615
0
            index = Group::load_aligned(self.ctrl(0))
1616
0
                .match_empty_or_deleted()
1617
0
                .lowest_set_bit()
1618
0
                .unwrap_unchecked();
1619
0
        }
1620
0
        InsertSlot { index }
1621
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fix_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::fix_insert_slot
1622
1623
    /// Finds the position to insert something in a group.
1624
    ///
1625
    /// **This may have false positives and must be fixed up with `fix_insert_slot`
1626
    /// before it's used.**
1627
    ///
1628
    /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
1629
    /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
1630
    #[inline]
1631
0
    fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1632
0
        let bit = group.match_empty_or_deleted().lowest_set_bit();
1633
0
1634
0
        if likely(bit.is_some()) {
1635
            // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1636
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1637
0
            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1638
        } else {
1639
0
            None
1640
        }
1641
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot_in_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot_in_group
1642
1643
    /// Searches for an element in the table, or a potential slot where that element could
1644
    /// be inserted (an empty or deleted [`Bucket`] index).
1645
    ///
1646
    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1647
    /// eliminated by LLVM optimizations.
1648
    ///
1649
    /// This function does not make any changes to the `data` part of the table, or any
1650
    /// changes to the `items` or `growth_left` field of the table.
1651
    ///
1652
    /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
1653
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
1654
    /// will never return (will go into an infinite loop) for tables larger than the group
1655
    /// width, or return an index outside of the table indices range if the table is less
1656
    /// than the group width.
1657
    ///
1658
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1659
    /// function with only `FULL` buckets' indices and return the `index` of the found
1660
    /// element (as `Ok(index)`). If the element is not found and there is at least 1
1661
    /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
1662
    /// [`InsertSlot`] with an index in the range `0..self.buckets()`, but in any case,
1663
    /// if this function returns [`InsertSlot`], it will contain an index in the range
1664
    /// `0..=self.buckets()`.
1665
    ///
1666
    /// # Safety
1667
    ///
1668
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1669
    /// this function results in [`undefined behavior`].
1670
    ///
1671
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1672
    /// less than the group width and if there was not at least one empty or deleted bucket in
1673
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1674
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1675
    /// control bytes outside the table range.
1676
    ///
1677
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1678
    #[inline]
1679
0
    unsafe fn find_or_find_insert_slot_inner(
1680
0
        &self,
1681
0
        hash: u64,
1682
0
        eq: &mut dyn FnMut(usize) -> bool,
1683
0
    ) -> Result<usize, InsertSlot> {
1684
0
        let mut insert_slot = None;
1685
0
1686
0
        let tag_hash = Tag::full(hash);
1687
0
        let mut probe_seq = self.probe_seq(hash);
1688
1689
        loop {
1690
            // SAFETY:
1691
            // * Caller of this function ensures that the control bytes are properly initialized.
1692
            //
1693
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1694
            //   of the table due to masking with `self.bucket_mask` and also because the number
1695
            //   of buckets is a power of two (see `self.probe_seq` function).
1696
            //
1697
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1698
            //   call `Group::load` due to the extended control bytes range, which is
1699
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1700
            //   byte will never be read for the allocated table);
1701
            //
1702
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1703
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1704
            //   bytes, which is safe (see RawTableInner::new).
1705
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1706
1707
0
            for bit in group.match_tag(tag_hash) {
1708
0
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1709
0
1710
0
                if likely(eq(index)) {
1711
0
                    return Ok(index);
1712
0
                }
1713
            }
1714
1715
            // We didn't find the element we were looking for in the group, try to get an
1716
            // insertion slot from the group if we don't have one yet.
1717
0
            if likely(insert_slot.is_none()) {
1718
0
                insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1719
0
            }
1720
1721
0
            if let Some(insert_slot) = insert_slot {
1722
                // Only stop the search if the group contains at least one empty element.
1723
                // Otherwise, the element that we are looking for might be in a following group.
1724
0
                if likely(group.match_empty().any_bit_set()) {
1725
                    // We must have found a insert slot by now, since the current group contains at
1726
                    // least one. For tables smaller than the group width, there will still be an
1727
                    // empty element in the current (and only) group due to the load factor.
1728
                    unsafe {
1729
                        // SAFETY:
1730
                        // * Caller of this function ensures that the control bytes are properly initialized.
1731
                        //
1732
                        // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1733
0
                        return Err(self.fix_insert_slot(insert_slot));
1734
                    }
1735
0
                }
1736
0
            }
1737
1738
0
            probe_seq.move_next(self.bucket_mask);
1739
        }
1740
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_or_find_insert_slot_inner
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_or_find_insert_slot_inner
1741
1742
    /// Searches for an empty or deleted bucket which is suitable for inserting a new
1743
    /// element and sets the hash for that slot. Returns an index of that slot and the
1744
    /// old control byte stored in the found index.
1745
    ///
1746
    /// This function does not check if the given element exists in the table. Also,
1747
    /// this function does not check if there is enough space in the table to insert
1748
    /// a new element. The caller of the function must make sure that the table has at
1749
    /// least 1 empty or deleted `bucket`, otherwise this function will never return
1750
    /// (will go into an infinite loop) for tables larger than the group width, or
1751
    /// return an index outside of the table indices range if the table is less than
1752
    /// the group width.
1753
    ///
1754
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1755
    /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
1756
    /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
1757
    ///
1758
    /// This function does not make any changes to the `data` parts of the table,
1759
    /// or any changes to the `items` or `growth_left` field of the table.
1760
    ///
1761
    /// # Safety
1762
    ///
1763
    /// The safety rules are directly derived from the safety rules for the
1764
    /// [`RawTableInner::set_ctrl_hash`] and [`RawTableInner::find_insert_slot`] methods.
1765
    /// Thus, in order to uphold the safety contracts for that methods, as well as for
1766
    /// the correct logic of the work of this crate, you must observe the following rules
1767
    /// when calling this function:
1768
    ///
1769
    /// * The [`RawTableInner`] has already been allocated and has properly initialized
1770
    ///   control bytes otherwise calling this function results in [`undefined behavior`].
1771
    ///
1772
    /// * The caller of this function must ensure that the "data" parts of the table
1773
    ///   will have an entry in the returned index (matching the given hash) right
1774
    ///   after calling this function.
1775
    ///
1776
    /// Attempt to write data at the `index` returned by this function when the table is
1777
    /// less than the group width and if there was not at least one empty or deleted bucket in
1778
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1779
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1780
    /// control bytes outside the table range.
1781
    ///
1782
    /// The caller must independently increase the `items` field of the table, and also,
1783
    /// if the old control byte was [`Tag::EMPTY`], then decrease the table's `growth_left`
1784
    /// field, and do not change it if the old control byte was [`Tag::DELETED`].
1785
    ///
1786
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1787
    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
1788
    ///
1789
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1790
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1791
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1792
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
1793
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1794
    #[inline]
1795
0
    unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, Tag) {
1796
0
        // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
1797
0
        let index: usize = self.find_insert_slot(hash).index;
1798
0
        // SAFETY:
1799
0
        // 1. The `find_insert_slot` function either returns an `index` less than or
1800
0
        //    equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
1801
0
        //    returns if it cannot find an empty or deleted slot.
1802
0
        // 2. The caller of this function guarantees that the table has already been
1803
0
        //    allocated
1804
0
        let old_ctrl = *self.ctrl(index);
1805
0
        self.set_ctrl_hash(index, hash);
1806
0
        (index, old_ctrl)
1807
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_insert_slot
1808
1809
    /// Searches for an empty or deleted bucket which is suitable for inserting
1810
    /// a new element, returning the `index` for the new [`Bucket`].
1811
    ///
1812
    /// This function does not make any changes to the `data` part of the table, or any
1813
    /// changes to the `items` or `growth_left` field of the table.
1814
    ///
1815
    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
1816
    /// will never return (will go into an infinite loop) for tables larger than the group
1817
    /// width, or return an index outside of the table indices range if the table is less
1818
    /// than the group width.
1819
    ///
1820
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
1821
    /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
1822
    /// but in any case, if this function returns [`InsertSlot`], it will contain an index
1823
    /// in the range `0..=self.buckets()`.
1824
    ///
1825
    /// # Safety
1826
    ///
1827
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1828
    /// this function results in [`undefined behavior`].
1829
    ///
1830
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1831
    /// less than the group width and if there was not at least one empty or deleted bucket in
1832
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1833
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`]
1834
    /// control bytes outside the table range.
1835
    ///
1836
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1837
    #[inline]
1838
0
    unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
1839
0
        let mut probe_seq = self.probe_seq(hash);
1840
        loop {
1841
            // SAFETY:
1842
            // * Caller of this function ensures that the control bytes are properly initialized.
1843
            //
1844
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1845
            //   of the table due to masking with `self.bucket_mask` and also because the number
1846
            //   of buckets is a power of two (see `self.probe_seq` function).
1847
            //
1848
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1849
            //   call `Group::load` due to the extended control bytes range, which is
1850
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1851
            //   byte will never be read for the allocated table);
1852
            //
1853
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1854
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1855
            //   bytes, which is safe (see RawTableInner::new).
1856
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1857
0
1858
0
            let index = self.find_insert_slot_in_group(&group, &probe_seq);
1859
0
            if likely(index.is_some()) {
1860
                // SAFETY:
1861
                // * Caller of this function ensures that the control bytes are properly initialized.
1862
                //
1863
                // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
1864
                unsafe {
1865
0
                    return self.fix_insert_slot(index.unwrap_unchecked());
1866
                }
1867
0
            }
1868
0
            probe_seq.move_next(self.bucket_mask);
1869
        }
1870
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::find_insert_slot
1871
1872
    /// Searches for an element in a table, returning the `index` of the found element.
1873
    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
1874
    /// eliminated by LLVM optimizations.
1875
    ///
1876
    /// This function does not make any changes to the `data` part of the table, or any
1877
    /// changes to the `items` or `growth_left` field of the table.
1878
    ///
1879
    /// The table must have at least 1 empty `bucket`, otherwise, if the
1880
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
1881
    /// this function will also never return (will go into an infinite loop).
1882
    ///
1883
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1884
    /// function with only `FULL` buckets' indices and return the `index` of the found
1885
    /// element as `Some(index)`, so the index will always be in the range
1886
    /// `0..self.buckets()`.
1887
    ///
1888
    /// # Safety
1889
    ///
1890
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1891
    /// this function results in [`undefined behavior`].
1892
    ///
1893
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1894
    #[inline(always)]
1895
0
    unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
1896
0
        let tag_hash = Tag::full(hash);
1897
0
        let mut probe_seq = self.probe_seq(hash);
1898
1899
        loop {
1900
            // SAFETY:
1901
            // * Caller of this function ensures that the control bytes are properly initialized.
1902
            //
1903
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1904
            //   of the table due to masking with `self.bucket_mask`.
1905
            //
1906
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1907
            //   call `Group::load` due to the extended control bytes range, which is
1908
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1909
            //   byte will never be read for the allocated table);
1910
            //
1911
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1912
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1913
            //   bytes, which is safe (see RawTableInner::new_in).
1914
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1915
1916
0
            for bit in group.match_tag(tag_hash) {
1917
                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1918
                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1919
0
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1920
0
1921
0
                if likely(eq(index)) {
1922
0
                    return Some(index);
1923
0
                }
1924
            }
1925
1926
0
            if likely(group.match_empty().any_bit_set()) {
1927
0
                return None;
1928
0
            }
1929
0
1930
0
            probe_seq.move_next(self.bucket_mask);
1931
        }
1932
0
    }
1933
1934
    /// Prepares for rehashing data in place (that is, without allocating new memory).
1935
    /// Converts all full index `control bytes` to `Tag::DELETED` and all `Tag::DELETED` control
1936
    /// bytes to `Tag::EMPTY`, i.e. performs the following conversion:
1937
    ///
1938
    /// - `Tag::EMPTY` control bytes   -> `Tag::EMPTY`;
1939
    /// - `Tag::DELETED` control bytes -> `Tag::EMPTY`;
1940
    /// - `FULL` control bytes    -> `Tag::DELETED`.
1941
    ///
1942
    /// This function does not make any changes to the `data` parts of the table,
1943
    /// or any changes to the `items` or `growth_left` field of the table.
1944
    ///
1945
    /// # Safety
1946
    ///
1947
    /// You must observe the following safety rules when calling this function:
1948
    ///
1949
    /// * The [`RawTableInner`] has already been allocated;
1950
    ///
1951
    /// * The caller of this function must convert the `Tag::DELETED` bytes back to `FULL`
1952
    ///   bytes when re-inserting them into their ideal position (which was impossible
1953
    ///   to do during the first insert due to tombstones). If the caller does not do
1954
    ///   this, then calling this function may result in a memory leak.
1955
    ///
1956
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
1957
    ///   calling this function results in [`undefined behavior`].
1958
    ///
1959
    /// Calling this function on a table that has not been allocated results in
1960
    /// [`undefined behavior`].
1961
    ///
1962
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
1963
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
1964
    ///
1965
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
1966
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1967
    #[allow(clippy::mut_mut)]
1968
    #[inline]
1969
0
    unsafe fn prepare_rehash_in_place(&mut self) {
1970
        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
1971
        // This effectively frees up all buckets containing a DELETED entry.
1972
        //
1973
        // SAFETY:
1974
        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
1975
        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
1976
        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
1977
        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
1978
        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
1979
        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
1980
0
        for i in (0..self.buckets()).step_by(Group::WIDTH) {
1981
0
            let group = Group::load_aligned(self.ctrl(i));
1982
0
            let group = group.convert_special_to_empty_and_full_to_deleted();
1983
0
            group.store_aligned(self.ctrl(i));
1984
0
        }
1985
1986
        // Fix up the trailing control bytes. See the comments in set_ctrl
1987
        // for the handling of tables smaller than the group width.
1988
        //
1989
        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
1990
        // has already been allocated
1991
0
        if unlikely(self.buckets() < Group::WIDTH) {
1992
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
1993
0
            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
1994
0
            // `Group::WIDTH` is safe
1995
0
            self.ctrl(0)
1996
0
                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
1997
0
        } else {
1998
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
1999
0
            // control bytes,so copying `Group::WIDTH` bytes with offset equal
2000
0
            // to `self.buckets() == self.bucket_mask + 1` is safe
2001
0
            self.ctrl(0)
2002
0
                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2003
0
        }
2004
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_rehash_in_place
2005
2006
    /// Returns an iterator over every element in the table.
2007
    ///
2008
    /// # Safety
2009
    ///
2010
    /// If any of the following conditions are violated, the result
2011
    /// is [`undefined behavior`]:
2012
    ///
2013
    /// * The caller has to ensure that the `RawTableInner` outlives the
2014
    ///   `RawIter`. Because we cannot make the `next` method unsafe on
2015
    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
2016
    ///
2017
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2018
    ///
2019
    /// The type `T` must be the actual type of the elements stored in the table,
2020
    /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
2021
    ///
2022
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2023
    #[inline]
2024
0
    unsafe fn iter<T>(&self) -> RawIter<T> {
2025
0
        // SAFETY:
2026
0
        // 1. Since the caller of this function ensures that the control bytes
2027
0
        //    are properly initialized and `self.data_end()` points to the start
2028
0
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2029
0
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2030
0
        //    control bytes.
2031
0
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2032
0
        //    equal to zero).
2033
0
        // 3. We pass the exact value of buckets of the table to the function.
2034
0
        //
2035
0
        //                         `ctrl` points here (to the start
2036
0
        //                         of the first control byte `CT0`)
2037
0
        //                          ∨
2038
0
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2039
0
        //                           \________  ________/
2040
0
        //                                    \/
2041
0
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2042
0
        //
2043
0
        // where: T0...T_n  - our stored data;
2044
0
        //        CT0...CT_n - control bytes or metadata for `data`.
2045
0
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2046
0
        //                        with loading `Group` bytes from the heap works properly, even if the result
2047
0
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2048
0
        //                        `RawTableInner::set_ctrl` function.
2049
0
        //
2050
0
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2051
0
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2052
0
        let data = Bucket::from_base_index(self.data_end(), 0);
2053
0
        RawIter {
2054
0
            // SAFETY: See explanation above
2055
0
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2056
0
            items: self.items,
2057
0
        }
2058
0
    }
2059
2060
    /// Executes the destructors (if any) of the values stored in the table.
2061
    ///
2062
    /// # Note
2063
    ///
2064
    /// This function does not erase the control bytes of the table and does
2065
    /// not make any changes to the `items` or `growth_left` fields of the
2066
    /// table. If necessary, the caller of this function must manually set
2067
    /// up these table fields, for example using the [`clear_no_drop`] function.
2068
    ///
2069
    /// Be careful during calling this function, because drop function of
2070
    /// the elements can panic, and this can leave table in an inconsistent
2071
    /// state.
2072
    ///
2073
    /// # Safety
2074
    ///
2075
    /// The type `T` must be the actual type of the elements stored in the table,
2076
    /// otherwise calling this function may result in [`undefined behavior`].
2077
    ///
2078
    /// If `T` is a type that should be dropped and **the table is not empty**,
2079
    /// calling this function more than once results in [`undefined behavior`].
2080
    ///
2081
    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2082
    /// calling this function may result in [`undefined behavior`].
2083
    ///
2084
    /// It is safe to call this function on a table that has not been allocated,
2085
    /// on a table with uninitialized control bytes, and on a table with no actual
2086
    /// data but with `Full` control bytes if `self.items == 0`.
2087
    ///
2088
    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2089
    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2090
    /// [`RawTableInner`].
2091
    ///
2092
    /// [`Bucket::drop`]: Bucket::drop
2093
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2094
    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2095
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2096
0
    unsafe fn drop_elements<T>(&mut self) {
2097
0
        // Check that `self.items != 0`. Protects against the possibility
2098
0
        // of creating an iterator on an table with uninitialized control bytes.
2099
0
        if T::NEEDS_DROP && self.items != 0 {
2100
            // SAFETY: We know for sure that RawTableInner will outlive the
2101
            // returned `RawIter` iterator, and the caller of this function
2102
            // must uphold the safety contract for `drop_elements` method.
2103
0
            for item in self.iter::<T>() {
2104
0
                // SAFETY: The caller must uphold the safety contract for
2105
0
                // `drop_elements` method.
2106
0
                item.drop();
2107
0
            }
2108
0
        }
2109
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_elements::<_>
2110
2111
    /// Executes the destructors (if any) of the values stored in the table and than
2112
    /// deallocates the table.
2113
    ///
2114
    /// # Note
2115
    ///
2116
    /// Calling this function automatically makes invalid (dangling) all instances of
2117
    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2118
    ///
2119
    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2120
    /// fields of the table. If necessary, the caller of this function must manually set
2121
    /// up these table fields.
2122
    ///
2123
    /// # Safety
2124
    ///
2125
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2126
    ///
2127
    /// * Calling this function more than once;
2128
    ///
2129
    /// * The type `T` must be the actual type of the elements stored in the table.
2130
    ///
2131
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2132
    ///   to allocate this table.
2133
    ///
2134
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2135
    ///   was used to allocate this table.
2136
    ///
2137
    /// The caller of this function should pay attention to the possibility of the
2138
    /// elements' drop function panicking, because this:
2139
    ///
2140
    ///    * May leave the table in an inconsistent state;
2141
    ///
2142
    ///    * Memory is never deallocated, so a memory leak may occur.
2143
    ///
2144
    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2145
    /// function results in [`undefined behavior`].
2146
    ///
2147
    /// It is safe to call this function on a table that has not been allocated,
2148
    /// on a table with uninitialized control bytes, and on a table with no actual
2149
    /// data but with `Full` control bytes if `self.items == 0`.
2150
    ///
2151
    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2152
    /// for more  information.
2153
    ///
2154
    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2155
    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2156
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2157
0
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2158
0
        if !self.is_empty_singleton() {
2159
0
            unsafe {
2160
0
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2161
0
                self.drop_elements::<T>();
2162
0
                // SAFETY:
2163
0
                // 1. We have checked that our table is allocated.
2164
0
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2165
0
                self.free_buckets(alloc, table_layout);
2166
0
            }
2167
0
        }
2168
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<usize, hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::drop_inner_table::<_, _>
2169
2170
    /// Returns a pointer to an element in the table (convenience for
2171
    /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
2172
    ///
2173
    /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
2174
    /// otherwise using it may result in [`undefined behavior`].
2175
    ///
2176
    /// # Safety
2177
    ///
2178
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
2179
    /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
2180
    /// this function, the following safety rules must be observed:
2181
    ///
2182
    /// * The table must already be allocated;
2183
    ///
2184
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2185
    ///   function, i.e. `(index + 1) <= self.buckets()`.
2186
    ///
2187
    /// * The type `T` must be the actual type of the elements stored in the table, otherwise
2188
    ///   using the returned [`Bucket`] may result in [`undefined behavior`].
2189
    ///
2190
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
2191
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
2192
    ///
2193
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
2194
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
2195
    /// `(index + 1) <= self.buckets()`.
2196
    ///
2197
    /// ```none
2198
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2199
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2200
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2201
    ///
2202
    ///           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
2203
    ///           part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
2204
    ///                  |
2205
    ///                  |               `base = table.data_end::<T>()` points here
2206
    ///                  |               (to the start of CT0 or to the end of T0)
2207
    ///                  v                 v
2208
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2209
    ///                     ^                                              \__________  __________/
2210
    ///        `table.bucket(3)` returns a pointer that points                        \/
2211
    ///         here in the `data` part of the `RawTableInner`             additional control bytes
2212
    ///         (to the end of T3)                                          `m = Group::WIDTH - 1`
2213
    ///
2214
    /// where: T0...T_n  - our stored data;
2215
    ///        CT0...CT_n - control bytes or metadata for `data`;
2216
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2217
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2218
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2219
    ///
2220
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2221
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2222
    /// ```
2223
    ///
2224
    /// [`Bucket::from_base_index`]: Bucket::from_base_index
2225
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2226
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2227
    #[inline]
2228
0
    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2229
0
        debug_assert_ne!(self.bucket_mask, 0);
2230
0
        debug_assert!(index < self.buckets());
2231
0
        Bucket::from_base_index(self.data_end(), index)
2232
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket::<_>
2233
2234
    /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
2235
    /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
2236
    ///
2237
    /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
2238
    /// otherwise using it may result in [`undefined behavior`].
2239
    ///
2240
    /// # Safety
2241
    ///
2242
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2243
    ///
2244
    /// * The table must already be allocated;
2245
    ///
2246
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2247
    ///   function, i.e. `(index + 1) <= self.buckets()`;
2248
    ///
2249
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2250
    ///
2251
    /// ```none
2252
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2253
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2254
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2255
    ///
2256
    ///           `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
2257
    ///           `data` part of the `RawTableInner`, i.e. to the start of T3
2258
    ///                  |
2259
    ///                  |               `base = table.data_end::<u8>()` points here
2260
    ///                  |               (to the start of CT0 or to the end of T0)
2261
    ///                  v                 v
2262
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2263
    ///                                                                    \__________  __________/
2264
    ///                                                                               \/
2265
    ///                                                                    additional control bytes
2266
    ///                                                                     `m = Group::WIDTH - 1`
2267
    ///
2268
    /// where: T0...T_n  - our stored data;
2269
    ///        CT0...CT_n - control bytes or metadata for `data`;
2270
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2271
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2272
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2273
    ///
2274
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2275
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2276
    /// ```
2277
    ///
2278
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2279
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2280
    #[inline]
2281
0
    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2282
0
        debug_assert_ne!(self.bucket_mask, 0);
2283
0
        debug_assert!(index < self.buckets());
2284
0
        let base: *mut u8 = self.data_end().as_ptr();
2285
0
        base.sub((index + 1) * size_of)
2286
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket_ptr
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::bucket_ptr
2287
2288
    /// Returns pointer to one past last `data` element in the table as viewed from
2289
    /// the start point of the allocation (convenience for `self.ctrl.cast()`).
2290
    ///
2291
    /// This function actually returns a pointer to the end of the `data element` at
2292
    /// index "0" (zero).
2293
    ///
2294
    /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
2295
    /// otherwise using it may result in [`undefined behavior`].
2296
    ///
2297
    /// # Note
2298
    ///
2299
    /// The type `T` must be the actual type of the elements stored in the table, otherwise
2300
    /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
2301
    ///
2302
    /// ```none
2303
    ///                        `table.data_end::<T>()` returns pointer that points here
2304
    ///                        (to the end of `T0`)
2305
    ///                          ∨
2306
    /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2307
    ///                           \________  ________/
2308
    ///                                    \/
2309
    ///       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2310
    ///
2311
    /// where: T0...T_n  - our stored data;
2312
    ///        CT0...CT_n - control bytes or metadata for `data`.
2313
    ///        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2314
    ///                        with loading `Group` bytes from the heap works properly, even if the result
2315
    ///                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2316
    ///                        `RawTableInner::set_ctrl` function.
2317
    ///
2318
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2319
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2320
    /// ```
2321
    ///
2322
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2323
    #[inline]
2324
0
    fn data_end<T>(&self) -> NonNull<T> {
2325
0
        self.ctrl.cast()
2326
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<u8>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<usize>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::data_end::<_>
2327
2328
    /// Returns an iterator-like object for a probe sequence on the table.
2329
    ///
2330
    /// This iterator never terminates, but is guaranteed to visit each bucket
2331
    /// group exactly once. The loop using `probe_seq` must terminate upon
2332
    /// reaching a group containing an empty bucket.
2333
    #[inline]
2334
0
    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2335
0
        ProbeSeq {
2336
0
            // This is the same as `hash as usize % self.buckets()` because the number
2337
0
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2338
0
            pos: h1(hash) & self.bucket_mask,
2339
0
            stride: 0,
2340
0
        }
2341
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::probe_seq
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::probe_seq
2342
2343
    #[inline]
2344
0
    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, hash: u64) {
2345
0
        self.growth_left -= usize::from(old_ctrl.special_is_empty());
2346
0
        self.set_ctrl_hash(index, hash);
2347
0
        self.items += 1;
2348
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::record_item_insert_at
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::record_item_insert_at
2349
2350
    #[inline]
2351
0
    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2352
0
        let probe_seq_pos = self.probe_seq(hash).pos;
2353
0
        let probe_index =
2354
0
            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group::{closure#0}
2355
0
        probe_index(i) == probe_index(new_i)
2356
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_in_same_group
2357
2358
    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2359
    /// the end of the array.
2360
    ///
2361
    /// This function does not make any changes to the `data` parts of the table,
2362
    /// or any changes to the `items` or `growth_left` field of the table.
2363
    ///
2364
    /// # Safety
2365
    ///
2366
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2367
    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2368
    /// following rules when calling this function:
2369
    ///
2370
    /// * The [`RawTableInner`] has already been allocated;
2371
    ///
2372
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2373
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2374
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2375
    ///
2376
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2377
    ///
2378
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2379
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2380
    ///
2381
    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2382
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2383
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2384
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2385
    #[inline]
2386
0
    unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) {
2387
0
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`]
2388
0
        self.set_ctrl(index, Tag::full(hash));
2389
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl_hash
2390
2391
    /// Replaces the hash in the control byte at the given index with the provided one,
2392
    /// and possibly also replicates the new control byte at the end of the array of control
2393
    /// bytes, returning the old control byte.
2394
    ///
2395
    /// This function does not make any changes to the `data` parts of the table,
2396
    /// or any changes to the `items` or `growth_left` field of the table.
2397
    ///
2398
    /// # Safety
2399
    ///
2400
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_hash`]
2401
    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2402
    /// methods, you must observe the following rules when calling this function:
2403
    ///
2404
    /// * The [`RawTableInner`] has already been allocated;
2405
    ///
2406
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2407
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2408
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2409
    ///
2410
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2411
    ///
2412
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2413
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2414
    ///
2415
    /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash
2416
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2417
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2418
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2419
    #[inline]
2420
0
    unsafe fn replace_ctrl_hash(&mut self, index: usize, hash: u64) -> Tag {
2421
0
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`]
2422
0
        let prev_ctrl = *self.ctrl(index);
2423
0
        self.set_ctrl_hash(index, hash);
2424
0
        prev_ctrl
2425
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::replace_ctrl_hash
2426
2427
    /// Sets a control byte, and possibly also the replicated control byte at
2428
    /// the end of the array.
2429
    ///
2430
    /// This function does not make any changes to the `data` parts of the table,
2431
    /// or any changes to the `items` or `growth_left` field of the table.
2432
    ///
2433
    /// # Safety
2434
    ///
2435
    /// You must observe the following safety rules when calling this function:
2436
    ///
2437
    /// * The [`RawTableInner`] has already been allocated;
2438
    ///
2439
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2440
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2441
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2442
    ///
2443
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2444
    ///
2445
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2446
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2447
    ///
2448
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2449
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2450
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2451
    #[inline]
2452
0
    unsafe fn set_ctrl(&mut self, index: usize, ctrl: Tag) {
2453
0
        // Replicate the first Group::WIDTH control bytes at the end of
2454
0
        // the array without using a branch. If the tables smaller than
2455
0
        // the group width (self.buckets() < Group::WIDTH),
2456
0
        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2457
0
        //
2458
0
        // - If index >= Group::WIDTH then index == index2.
2459
0
        // - Otherwise index2 == self.bucket_mask + 1 + index.
2460
0
        //
2461
0
        // The very last replicated control byte is never actually read because
2462
0
        // we mask the initial index for unaligned loads, but we write it
2463
0
        // anyways because it makes the set_ctrl implementation simpler.
2464
0
        //
2465
0
        // If there are fewer buckets than Group::WIDTH then this code will
2466
0
        // replicate the buckets at the end of the trailing group. For example
2467
0
        // with 2 buckets and a group size of 4, the control bytes will look
2468
0
        // like this:
2469
0
        //
2470
0
        //     Real    |             Replicated
2471
0
        // ---------------------------------------------
2472
0
        // | [A] | [B] | [Tag::EMPTY] | [EMPTY] | [A] | [B] |
2473
0
        // ---------------------------------------------
2474
0
2475
0
        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2476
0
        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2477
0
        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2478
0
2479
0
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2480
0
        *self.ctrl(index) = ctrl;
2481
0
        *self.ctrl(index2) = ctrl;
2482
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::set_ctrl
2483
2484
    /// Returns a pointer to a control byte.
2485
    ///
2486
    /// # Safety
2487
    ///
2488
    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2489
    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2490
    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2491
    /// will return a pointer to the end of the allocated table and it is useless on its own.
2492
    ///
2493
    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2494
    /// table that has not been allocated results in [`Undefined Behavior`].
2495
    ///
2496
    /// So to satisfy both requirements you should always follow the rule that
2497
    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2498
    ///
2499
    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2500
    /// for read-only purpose.
2501
    ///
2502
    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2503
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2504
    ///
2505
    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2506
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2507
    #[inline]
2508
0
    unsafe fn ctrl(&self, index: usize) -> *mut Tag {
2509
0
        debug_assert!(index < self.num_ctrl_bytes());
2510
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2511
0
        self.ctrl.as_ptr().add(index).cast()
2512
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::ctrl
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::ctrl
2513
2514
    /// Gets the slice of all control bytes.
2515
0
    fn ctrl_slice(&mut self) -> &mut [Tag] {
2516
0
        // SAFETY: We've intiailized all control bytes, and have the correct number.
2517
0
        unsafe { slice::from_raw_parts_mut(self.ctrl.as_ptr().cast(), self.num_ctrl_bytes()) }
2518
0
    }
2519
2520
    #[inline]
2521
0
    fn buckets(&self) -> usize {
2522
0
        self.bucket_mask + 1
2523
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::buckets
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::buckets
2524
2525
    /// Checks whether the bucket at `index` is full.
2526
    ///
2527
    /// # Safety
2528
    ///
2529
    /// The caller must ensure `index` is less than the number of buckets.
2530
    #[inline]
2531
0
    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2532
0
        debug_assert!(index < self.buckets());
2533
0
        (*self.ctrl(index)).is_full()
2534
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_bucket_full
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_bucket_full
2535
2536
    #[inline]
2537
0
    fn num_ctrl_bytes(&self) -> usize {
2538
0
        self.bucket_mask + 1 + Group::WIDTH
2539
0
    }
2540
2541
    #[inline]
2542
0
    fn is_empty_singleton(&self) -> bool {
2543
0
        self.bucket_mask == 0
2544
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_empty_singleton
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::is_empty_singleton
2545
2546
    /// Attempts to allocate a new hash table with at least enough capacity
2547
    /// for inserting the given number of elements without reallocating,
2548
    /// and return it inside `ScopeGuard` to protect against panic in the hash
2549
    /// function.
2550
    ///
2551
    /// # Note
2552
    ///
2553
    /// It is recommended (but not required):
2554
    ///
2555
    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2556
    ///
2557
    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2558
    ///   to allocate this table.
2559
    ///
2560
    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2561
    ///   to allocate this table.
2562
    ///
2563
    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2564
    /// this table, then using `mem::swap` with the `self` and the new table returned
2565
    /// by this function results in [`undefined behavior`].
2566
    ///
2567
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2568
    #[allow(clippy::mut_mut)]
2569
    #[inline]
2570
0
    fn prepare_resize<'a, A>(
2571
0
        &self,
2572
0
        alloc: &'a A,
2573
0
        table_layout: TableLayout,
2574
0
        capacity: usize,
2575
0
        fallibility: Fallibility,
2576
0
    ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2577
0
    where
2578
0
        A: Allocator,
2579
0
    {
2580
0
        debug_assert!(self.items <= capacity);
2581
2582
        // Allocate and initialize the new table.
2583
0
        let new_table =
2584
0
            RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2585
2586
        // The hash function may panic, in which case we simply free the new
2587
        // table without dropping any elements that may have been copied into
2588
        // it.
2589
        //
2590
        // This guard is also used to free the old table on success, see
2591
        // the comment at the bottom of this function.
2592
0
        Ok(guard(new_table, move |self_| {
2593
0
            if !self_.is_empty_singleton() {
2594
0
                // SAFETY:
2595
0
                // 1. We have checked that our table is allocated.
2596
0
                // 2. We know for sure that the `alloc` and `table_layout` matches the
2597
0
                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2598
0
                unsafe { self_.free_buckets(alloc, table_layout) };
2599
0
            }
2600
0
        }))
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<hashbrown::raw::alloc::inner::Global>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<_>::{closure#0}
2601
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::prepare_resize::<_>
2602
2603
    /// Reserves or rehashes to make room for `additional` more elements.
2604
    ///
2605
    /// This uses dynamic dispatch to reduce the amount of
2606
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2607
    ///
2608
    /// # Safety
2609
    ///
2610
    /// If any of the following conditions are violated, the result is
2611
    /// [`undefined behavior`]:
2612
    ///
2613
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2614
    ///   to allocate this table.
2615
    ///
2616
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2617
    ///   used to allocate this table.
2618
    ///
2619
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2620
    ///   the elements stored in the table.
2621
    ///
2622
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2623
    ///
2624
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2625
    #[allow(clippy::inline_always)]
2626
    #[inline(always)]
2627
0
    unsafe fn reserve_rehash_inner<A>(
2628
0
        &mut self,
2629
0
        alloc: &A,
2630
0
        additional: usize,
2631
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2632
0
        fallibility: Fallibility,
2633
0
        layout: TableLayout,
2634
0
        drop: Option<unsafe fn(*mut u8)>,
2635
0
    ) -> Result<(), TryReserveError>
2636
0
    where
2637
0
        A: Allocator,
2638
0
    {
2639
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2640
0
        let new_items = match self.items.checked_add(additional) {
2641
0
            Some(new_items) => new_items,
2642
0
            None => return Err(fallibility.capacity_overflow()),
2643
        };
2644
0
        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2645
0
        if new_items <= full_capacity / 2 {
2646
            // Rehash in-place without re-allocating if we have plenty of spare
2647
            // capacity that is locked up due to DELETED entries.
2648
2649
            // SAFETY:
2650
            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2651
            //    (since new_items <= full_capacity / 2);
2652
            // 2. The caller ensures that `drop` function is the actual drop function of
2653
            //    the elements stored in the table.
2654
            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2655
            //    used to allocate this table.
2656
            // 4. The caller ensures that the control bytes of the `RawTableInner`
2657
            //    are already initialized.
2658
0
            self.rehash_in_place(hasher, layout.size, drop);
2659
0
            Ok(())
2660
        } else {
2661
            // Otherwise, conservatively resize to at least the next size up
2662
            // to avoid churning deletes into frequent rehashes.
2663
            //
2664
            // SAFETY:
2665
            // 1. We know for sure that `capacity >= self.items`.
2666
            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2667
            //    [`TableLayout`] that were used to allocate this table.
2668
            // 3. The caller ensures that the control bytes of the `RawTableInner`
2669
            //    are already initialized.
2670
0
            self.resize_inner(
2671
0
                alloc,
2672
0
                usize::max(new_items, full_capacity + 1),
2673
0
                hasher,
2674
0
                fallibility,
2675
0
                layout,
2676
0
            )
2677
        }
2678
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::reserve_rehash_inner::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::reserve_rehash_inner::<_>
2679
2680
    /// Returns an iterator over full buckets indices in the table.
2681
    ///
2682
    /// # Safety
2683
    ///
2684
    /// Behavior is undefined if any of the following conditions are violated:
2685
    ///
2686
    /// * The caller has to ensure that the `RawTableInner` outlives the
2687
    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2688
    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2689
    ///   `full_buckets_indices` method unsafe.
2690
    ///
2691
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2692
    #[inline(always)]
2693
0
    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2694
0
        // SAFETY:
2695
0
        // 1. Since the caller of this function ensures that the control bytes
2696
0
        //    are properly initialized and `self.ctrl(0)` points to the start
2697
0
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2698
0
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2699
0
        //    control bytes.
2700
0
        // 2. The value of `items` is equal to the amount of data (values) added
2701
0
        //    to the table.
2702
0
        //
2703
0
        //                         `ctrl` points here (to the start
2704
0
        //                         of the first control byte `CT0`)
2705
0
        //                          ∨
2706
0
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2707
0
        //                           \________  ________/
2708
0
        //                                    \/
2709
0
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2710
0
        //
2711
0
        // where: T0...T_n  - our stored data;
2712
0
        //        CT0...CT_n - control bytes or metadata for `data`.
2713
0
        let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::<u8>());
2714
0
2715
0
        FullBucketsIndices {
2716
0
            // Load the first group
2717
0
            // SAFETY: See explanation above.
2718
0
            current_group: Group::load_aligned(ctrl.as_ptr().cast())
2719
0
                .match_full()
2720
0
                .into_iter(),
2721
0
            group_first_index: 0,
2722
0
            ctrl,
2723
0
            items: self.items,
2724
0
        }
2725
0
    }
2726
2727
    /// Allocates a new table of a different size and moves the contents of the
2728
    /// current table into it.
2729
    ///
2730
    /// This uses dynamic dispatch to reduce the amount of
2731
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2732
    ///
2733
    /// # Safety
2734
    ///
2735
    /// If any of the following conditions are violated, the result is
2736
    /// [`undefined behavior`]:
2737
    ///
2738
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2739
    ///   to allocate this table;
2740
    ///
2741
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2742
    ///   used to allocate this table;
2743
    ///
2744
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2745
    ///
2746
    /// The caller of this function must ensure that `capacity >= self.items`
2747
    /// otherwise:
2748
    ///
2749
    /// * If `self.items != 0`, calling of this function with `capacity == 0`
2750
    ///   results in [`undefined behavior`].
2751
    ///
2752
    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
2753
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2754
    ///   results in [`undefined behavior`].
2755
    ///
2756
    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
2757
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
2758
    ///   are never return (will go into an infinite loop).
2759
    ///
2760
    /// Note: It is recommended (but not required) that the new table's `capacity`
2761
    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
2762
    /// this function can never return. See [`RawTableInner::find_insert_slot`] for
2763
    /// more information.
2764
    ///
2765
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2766
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2767
    #[allow(clippy::inline_always)]
2768
    #[inline(always)]
2769
0
    unsafe fn resize_inner<A>(
2770
0
        &mut self,
2771
0
        alloc: &A,
2772
0
        capacity: usize,
2773
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2774
0
        fallibility: Fallibility,
2775
0
        layout: TableLayout,
2776
0
    ) -> Result<(), TryReserveError>
2777
0
    where
2778
0
        A: Allocator,
2779
0
    {
2780
        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
2781
        // that were used to allocate this table.
2782
0
        let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
2783
2784
        // SAFETY: We know for sure that RawTableInner will outlive the
2785
        // returned `FullBucketsIndices` iterator, and the caller of this
2786
        // function ensures that the control bytes are properly initialized.
2787
0
        for full_byte_index in self.full_buckets_indices() {
2788
0
            // This may panic.
2789
0
            let hash = hasher(self, full_byte_index);
2790
0
2791
0
            // SAFETY:
2792
0
            // We can use a simpler version of insert() here since:
2793
0
            // 1. There are no DELETED entries.
2794
0
            // 2. We know there is enough space in the table.
2795
0
            // 3. All elements are unique.
2796
0
            // 4. The caller of this function guarantees that `capacity > 0`
2797
0
            //    so `new_table` must already have some allocated memory.
2798
0
            // 5. We set `growth_left` and `items` fields of the new table
2799
0
            //    after the loop.
2800
0
            // 6. We insert into the table, at the returned index, the data
2801
0
            //    matching the given hash immediately after calling this function.
2802
0
            let (new_index, _) = new_table.prepare_insert_slot(hash);
2803
0
2804
0
            // SAFETY:
2805
0
            //
2806
0
            // * `src` is valid for reads of `layout.size` bytes, since the
2807
0
            //   table is alive and the `full_byte_index` is guaranteed to be
2808
0
            //   within bounds (see `FullBucketsIndices::next_impl`);
2809
0
            //
2810
0
            // * `dst` is valid for writes of `layout.size` bytes, since the
2811
0
            //   caller ensures that `table_layout` matches the [`TableLayout`]
2812
0
            //   that was used to allocate old table and we have the `new_index`
2813
0
            //   returned by `prepare_insert_slot`.
2814
0
            //
2815
0
            // * Both `src` and `dst` are properly aligned.
2816
0
            //
2817
0
            // * Both `src` and `dst` point to different region of memory.
2818
0
            ptr::copy_nonoverlapping(
2819
0
                self.bucket_ptr(full_byte_index, layout.size),
2820
0
                new_table.bucket_ptr(new_index, layout.size),
2821
0
                layout.size,
2822
0
            );
2823
0
        }
2824
2825
        // The hash function didn't panic, so we can safely set the
2826
        // `growth_left` and `items` fields of the new table.
2827
0
        new_table.growth_left -= self.items;
2828
0
        new_table.items = self.items;
2829
0
2830
0
        // We successfully copied all elements without panicking. Now replace
2831
0
        // self with the new table. The old table will have its memory freed but
2832
0
        // the items will not be dropped (since they have been moved into the
2833
0
        // new table).
2834
0
        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
2835
0
        // that was used to allocate this table.
2836
0
        mem::swap(self, &mut new_table);
2837
0
2838
0
        Ok(())
2839
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::resize_inner::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::resize_inner::<_>
2840
2841
    /// Rehashes the contents of the table in place (i.e. without changing the
2842
    /// allocation).
2843
    ///
2844
    /// If `hasher` panics then some the table's contents may be lost.
2845
    ///
2846
    /// This uses dynamic dispatch to reduce the amount of
2847
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2848
    ///
2849
    /// # Safety
2850
    ///
2851
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2852
    ///
2853
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2854
    ///
2855
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2856
    ///   the elements stored in the table.
2857
    ///
2858
    /// * The [`RawTableInner`] has already been allocated;
2859
    ///
2860
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2861
    ///
2862
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2863
    #[allow(clippy::inline_always)]
2864
    #[cfg_attr(feature = "inline-more", inline(always))]
2865
    #[cfg_attr(not(feature = "inline-more"), inline)]
2866
0
    unsafe fn rehash_in_place(
2867
0
        &mut self,
2868
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2869
0
        size_of: usize,
2870
0
        drop: Option<unsafe fn(*mut u8)>,
2871
0
    ) {
2872
0
        // If the hash function panics then properly clean up any elements
2873
0
        // that we haven't rehashed yet. We unfortunately can't preserve the
2874
0
        // element since we lost their hash and have no way of recovering it
2875
0
        // without risking another panic.
2876
0
        self.prepare_rehash_in_place();
2877
0
2878
0
        let mut guard = guard(self, move |self_| {
2879
0
            if let Some(drop) = drop {
2880
0
                for i in 0..self_.buckets() {
2881
0
                    if *self_.ctrl(i) == Tag::DELETED {
2882
0
                        self_.set_ctrl(i, Tag::EMPTY);
2883
0
                        drop(self_.bucket_ptr(i, size_of));
2884
0
                        self_.items -= 1;
2885
0
                    }
2886
                }
2887
0
            }
2888
0
            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
2889
0
        });
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place::{closure#0}
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place::{closure#0}
2890
2891
        // At this point, DELETED elements are elements that we haven't
2892
        // rehashed yet. Find them and re-insert them at their ideal
2893
        // position.
2894
0
        'outer: for i in 0..guard.buckets() {
2895
0
            if *guard.ctrl(i) != Tag::DELETED {
2896
0
                continue;
2897
0
            }
2898
0
2899
0
            let i_p = guard.bucket_ptr(i, size_of);
2900
2901
            'inner: loop {
2902
                // Hash the current item
2903
0
                let hash = hasher(*guard, i);
2904
0
2905
0
                // Search for a suitable place to put it
2906
0
                //
2907
0
                // SAFETY: Caller of this function ensures that the control bytes
2908
0
                // are properly initialized.
2909
0
                let new_i = guard.find_insert_slot(hash).index;
2910
0
2911
0
                // Probing works by scanning through all of the control
2912
0
                // bytes in groups, which may not be aligned to the group
2913
0
                // size. If both the new and old position fall within the
2914
0
                // same unaligned group, then there is no benefit in moving
2915
0
                // it and we can just continue to the next item.
2916
0
                if likely(guard.is_in_same_group(i, new_i, hash)) {
2917
0
                    guard.set_ctrl_hash(i, hash);
2918
0
                    continue 'outer;
2919
0
                }
2920
0
2921
0
                let new_i_p = guard.bucket_ptr(new_i, size_of);
2922
0
2923
0
                // We are moving the current item to a new position. Write
2924
0
                // our H2 to the control byte of the new position.
2925
0
                let prev_ctrl = guard.replace_ctrl_hash(new_i, hash);
2926
0
                if prev_ctrl == Tag::EMPTY {
2927
0
                    guard.set_ctrl(i, Tag::EMPTY);
2928
0
                    // If the target slot is empty, simply move the current
2929
0
                    // element into the new slot and clear the old control
2930
0
                    // byte.
2931
0
                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
2932
0
                    continue 'outer;
2933
                } else {
2934
                    // If the target slot is occupied, swap the two elements
2935
                    // and then continue processing the element that we just
2936
                    // swapped into the old slot.
2937
0
                    debug_assert_eq!(prev_ctrl, Tag::DELETED);
2938
0
                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
2939
0
                    continue 'inner;
2940
                }
2941
            }
2942
        }
2943
2944
0
        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
2945
0
2946
0
        mem::forget(guard);
2947
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::rehash_in_place
2948
2949
    /// Deallocates the table without dropping any entries.
2950
    ///
2951
    /// # Note
2952
    ///
2953
    /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
2954
    /// else it can lead to leaking of memory. Also calling this function automatically
2955
    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
2956
    /// (dangling) the `ctrl` field of the table.
2957
    ///
2958
    /// # Safety
2959
    ///
2960
    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
2961
    ///
2962
    /// * The [`RawTableInner`] has already been allocated;
2963
    ///
2964
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2965
    ///   to allocate this table.
2966
    ///
2967
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
2968
    ///   to allocate this table.
2969
    ///
2970
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
2971
    ///
2972
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2973
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
2974
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
2975
    #[inline]
2976
0
    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
2977
0
    where
2978
0
        A: Allocator,
2979
0
    {
2980
0
        // SAFETY: The caller must uphold the safety contract for `free_buckets`
2981
0
        // method.
2982
0
        let (ptr, layout) = self.allocation_info(table_layout);
2983
0
        alloc.deallocate(ptr, layout);
2984
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::free_buckets::<hashbrown::raw::alloc::inner::Global>
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::free_buckets::<_>
2985
2986
    /// Returns a pointer to the allocated memory and the layout that was used to
2987
    /// allocate the table.
2988
    ///
2989
    /// # Safety
2990
    ///
2991
    /// Caller of this function must observe the following safety rules:
2992
    ///
2993
    /// * The [`RawTableInner`] has already been allocated, otherwise
2994
    ///   calling this function results in [`undefined behavior`]
2995
    ///
2996
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
2997
    ///   that was used to allocate this table. Failure to comply with this condition
2998
    ///   may result in [`undefined behavior`].
2999
    ///
3000
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3001
    ///
3002
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3003
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3004
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3005
    #[inline]
3006
0
    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3007
0
        debug_assert!(
3008
0
            !self.is_empty_singleton(),
3009
0
            "this function can only be called on non-empty tables"
3010
        );
3011
3012
        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
3013
0
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
3014
0
            Some(lco) => lco,
3015
0
            None => unsafe { hint::unreachable_unchecked() },
3016
        };
3017
0
        (
3018
0
            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
3019
0
            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
3020
0
            layout,
3021
0
        )
3022
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::allocation_info
Unexecuted instantiation: <hashbrown::raw::RawTableInner>::allocation_info
3023
3024
    /// Returns the total amount of memory allocated internally by the hash
3025
    /// table, in bytes.
3026
    ///
3027
    /// The returned number is informational only. It is intended to be
3028
    /// primarily used for memory profiling.
3029
    ///
3030
    /// # Safety
3031
    ///
3032
    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3033
    /// that was used to allocate this table. Failure to comply with this condition
3034
    /// may result in [`undefined behavior`].
3035
    ///
3036
    ///
3037
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3038
    #[inline]
3039
0
    unsafe fn allocation_size_or_zero(&self, table_layout: TableLayout) -> usize {
3040
0
        if self.is_empty_singleton() {
3041
0
            0
3042
        } else {
3043
            // SAFETY:
3044
            // 1. We have checked that our table is allocated.
3045
            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
3046
            // that was used to allocate this table.
3047
0
            unsafe { self.allocation_info(table_layout).1.size() }
3048
        }
3049
0
    }
3050
3051
    /// Marks all table buckets as empty without dropping their contents.
3052
    #[inline]
3053
0
    fn clear_no_drop(&mut self) {
3054
0
        if !self.is_empty_singleton() {
3055
0
            self.ctrl_slice().fill_empty();
3056
0
        }
3057
0
        self.items = 0;
3058
0
        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
3059
0
    }
3060
3061
    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
3062
    /// triggered as full, decreases the `items` of the table and, if it can be done,
3063
    /// increases `self.growth_left`.
3064
    ///
3065
    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
3066
    /// does not make any changes to the `data` parts of the table. The caller of this
3067
    /// function must take care to properly drop the `data`, otherwise calling this
3068
    /// function may result in a memory leak.
3069
    ///
3070
    /// # Safety
3071
    ///
3072
    /// You must observe the following safety rules when calling this function:
3073
    ///
3074
    /// * The [`RawTableInner`] has already been allocated;
3075
    ///
3076
    /// * It must be the full control byte at the given position;
3077
    ///
3078
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
3079
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
3080
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
3081
    ///
3082
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
3083
    ///
3084
    /// Calling this function on a table with no elements is unspecified, but calling subsequent
3085
    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
3086
    /// (`self.items -= 1 cause overflow when self.items == 0`).
3087
    ///
3088
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
3089
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
3090
    ///
3091
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
3092
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
3093
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3094
    #[inline]
3095
0
    unsafe fn erase(&mut self, index: usize) {
3096
0
        debug_assert!(self.is_bucket_full(index));
3097
3098
        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3099
        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3100
0
        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3101
0
        // SAFETY:
3102
0
        // - The caller must uphold the safety contract for `erase` method;
3103
0
        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3104
0
        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3105
0
        let empty_after = Group::load(self.ctrl(index)).match_empty();
3106
3107
        // Inserting and searching in the map is performed by two key functions:
3108
        //
3109
        // - The `find_insert_slot` function that looks up the index of any `Tag::EMPTY` or `Tag::DELETED`
3110
        //   slot in a group to be able to insert. If it doesn't find an `Tag::EMPTY` or `Tag::DELETED`
3111
        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
3112
        //   and so on until it has gone through all the groups in the control bytes.
3113
        //
3114
        // - The `find_inner` function that looks for the index of the desired element by looking
3115
        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
3116
        //   there is no `Tag::EMPTY` byte in the group, then this means that the `find_insert_slot`
3117
        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
3118
        //   jumps further, and if it does not find the desired element and again there is no `Tag::EMPTY`
3119
        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
3120
        //   finds the desired element or hits an `Tag::EMPTY` slot/byte.
3121
        //
3122
        // Accordingly, this leads to two consequences:
3123
        //
3124
        // - The map must have `Tag::EMPTY` slots (bytes);
3125
        //
3126
        // - You can't just mark the byte to be erased as `Tag::EMPTY`, because otherwise the `find_inner`
3127
        //   function may stumble upon an `Tag::EMPTY` byte before finding the desired element and stop
3128
        //   searching.
3129
        //
3130
        // Thus it is necessary to check all bytes after and before the erased element. If we are in
3131
        // a contiguous `Group` of `FULL` or `Tag::DELETED` bytes (the number of `FULL` or `Tag::DELETED` bytes
3132
        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3133
        // `Tag::DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3134
        // is at least one `Tag::EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3135
        // upon an `Tag::EMPTY` byte, so we can safely mark our erased byte as `Tag::EMPTY` as well.
3136
        //
3137
        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3138
        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3139
        // cannot have `Tag::DELETED` bytes.
3140
        //
3141
        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3142
        // `trailing_zeros` refers to the bytes at the beginning of a group.
3143
0
        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3144
0
            Tag::DELETED
3145
        } else {
3146
0
            self.growth_left += 1;
3147
0
            Tag::EMPTY
3148
        };
3149
        // SAFETY: the caller must uphold the safety contract for `erase` method.
3150
0
        self.set_ctrl(index, ctrl);
3151
0
        self.items -= 1;
3152
0
    }
3153
}
3154
3155
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
3156
0
    fn clone(&self) -> Self {
3157
0
        if self.table.is_empty_singleton() {
3158
0
            Self::new_in(self.alloc.clone())
3159
        } else {
3160
            unsafe {
3161
                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3162
                //
3163
                // SAFETY: This is safe as we are taking the size of an already allocated table
3164
                // and therefore capacity overflow cannot occur, `self.table.buckets()` is power
3165
                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3166
0
                let mut new_table = match Self::new_uninitialized(
3167
0
                    self.alloc.clone(),
3168
0
                    self.table.buckets(),
3169
0
                    Fallibility::Infallible,
3170
0
                ) {
3171
0
                    Ok(table) => table,
3172
0
                    Err(_) => hint::unreachable_unchecked(),
3173
                };
3174
3175
                // Cloning elements may fail (the clone function may panic). But we don't
3176
                // need to worry about uninitialized control bits, since:
3177
                // 1. The number of items (elements) in the table is zero, which means that
3178
                //    the control bits will not be read by Drop function.
3179
                // 2. The `clone_from_spec` method will first copy all control bits from
3180
                //    `self` (thus initializing them). But this will not affect the `Drop`
3181
                //    function, since the `clone_from_spec` function sets `items` only after
3182
                //    successfully cloning all elements.
3183
0
                new_table.clone_from_spec(self);
3184
0
                new_table
3185
            }
3186
        }
3187
0
    }
3188
3189
0
    fn clone_from(&mut self, source: &Self) {
3190
0
        if source.table.is_empty_singleton() {
3191
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3192
0
            unsafe {
3193
0
                // SAFETY:
3194
0
                // 1. We call the function only once;
3195
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3196
0
                //    and [`TableLayout`] that were used to allocate this table.
3197
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
3198
0
                //    because we have replaced the inner table with a new one.
3199
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3200
0
            }
3201
        } else {
3202
            unsafe {
3203
                // Make sure that if any panics occurs, we clear the table and
3204
                // leave it in an empty state.
3205
0
                let mut self_ = guard(self, |self_| {
3206
0
                    self_.clear_no_drop();
3207
0
                });
3208
0
3209
0
                // First, drop all our elements without clearing the control
3210
0
                // bytes. If this panics then the scope guard will clear the
3211
0
                // table, leaking any elements that were not dropped yet.
3212
0
                //
3213
0
                // This leak is unavoidable: we can't try dropping more elements
3214
0
                // since this could lead to another panic and abort the process.
3215
0
                //
3216
0
                // SAFETY: If something gets wrong we clear our table right after
3217
0
                // dropping the elements, so there is no double drop, since `items`
3218
0
                // will be equal to zero.
3219
0
                self_.table.drop_elements::<T>();
3220
0
3221
0
                // If necessary, resize our table to match the source.
3222
0
                if self_.buckets() != source.buckets() {
3223
0
                    let new_inner = match RawTableInner::new_uninitialized(
3224
0
                        &self_.alloc,
3225
0
                        Self::TABLE_LAYOUT,
3226
0
                        source.buckets(),
3227
0
                        Fallibility::Infallible,
3228
0
                    ) {
3229
0
                        Ok(table) => table,
3230
0
                        Err(_) => hint::unreachable_unchecked(),
3231
                    };
3232
                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3233
                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3234
0
                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3235
0
                    if !old_inner.is_empty_singleton() {
3236
0
                        // SAFETY:
3237
0
                        // 1. We have checked that our table is allocated.
3238
0
                        // 2. We know for sure that `alloc` and `table_layout` matches
3239
0
                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3240
0
                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3241
0
                    }
3242
0
                }
3243
3244
                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3245
                // inside the `clone_from_impl` function will take care of that, dropping all
3246
                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3247
0
                self_.clone_from_spec(source);
3248
0
3249
0
                // Disarm the scope guard if cloning was successful.
3250
0
                ScopeGuard::into_inner(self_);
3251
            }
3252
        }
3253
0
    }
3254
}
3255
3256
/// Specialization of `clone_from` for `Copy` types
3257
trait RawTableClone {
3258
    unsafe fn clone_from_spec(&mut self, source: &Self);
3259
}
3260
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3261
    default_fn! {
3262
        #[cfg_attr(feature = "inline-more", inline)]
3263
0
        unsafe fn clone_from_spec(&mut self, source: &Self) {
3264
0
            self.clone_from_impl(source);
3265
0
        }
3266
    }
3267
}
3268
#[cfg(feature = "nightly")]
3269
impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3270
    #[cfg_attr(feature = "inline-more", inline)]
3271
    unsafe fn clone_from_spec(&mut self, source: &Self) {
3272
        source
3273
            .table
3274
            .ctrl(0)
3275
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3276
        source
3277
            .data_start()
3278
            .as_ptr()
3279
            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3280
3281
        self.table.items = source.table.items;
3282
        self.table.growth_left = source.table.growth_left;
3283
    }
3284
}
3285
3286
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
3287
    /// Common code for `clone` and `clone_from`. Assumes:
3288
    /// - `self.buckets() == source.buckets()`.
3289
    /// - Any existing elements have been dropped.
3290
    /// - The control bytes are not initialized yet.
3291
    #[cfg_attr(feature = "inline-more", inline)]
3292
0
    unsafe fn clone_from_impl(&mut self, source: &Self) {
3293
0
        // Copy the control bytes unchanged. We do this in a single pass
3294
0
        source
3295
0
            .table
3296
0
            .ctrl(0)
3297
0
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3298
0
3299
0
        // The cloning of elements may panic, in which case we need
3300
0
        // to make sure we drop only the elements that have been
3301
0
        // cloned so far.
3302
0
        let mut guard = guard((0, &mut *self), |(index, self_)| {
3303
0
            if T::NEEDS_DROP {
3304
0
                for i in 0..*index {
3305
0
                    if self_.is_bucket_full(i) {
3306
0
                        self_.bucket(i).drop();
3307
0
                    }
3308
                }
3309
0
            }
3310
0
        });
3311
3312
0
        for from in source.iter() {
3313
0
            let index = source.bucket_index(&from);
3314
0
            let to = guard.1.bucket(index);
3315
0
            to.write(from.as_ref().clone());
3316
0
3317
0
            // Update the index in case we need to unwind.
3318
0
            guard.0 = index + 1;
3319
0
        }
3320
3321
        // Successfully cloned all items, no need to clean up.
3322
0
        mem::forget(guard);
3323
0
3324
0
        self.table.items = source.table.items;
3325
0
        self.table.growth_left = source.table.growth_left;
3326
0
    }
3327
}
3328
3329
impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3330
    #[inline]
3331
0
    fn default() -> Self {
3332
0
        Self::new_in(Default::default())
3333
0
    }
3334
}
3335
3336
#[cfg(feature = "nightly")]
3337
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3338
    #[cfg_attr(feature = "inline-more", inline)]
3339
    fn drop(&mut self) {
3340
        unsafe {
3341
            // SAFETY:
3342
            // 1. We call the function only once;
3343
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3344
            //    and [`TableLayout`] that were used to allocate this table.
3345
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3346
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3347
            //    so there won't be any table left in an inconsistent state.
3348
            self.table
3349
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3350
        }
3351
    }
3352
}
3353
#[cfg(not(feature = "nightly"))]
3354
impl<T, A: Allocator> Drop for RawTable<T, A> {
3355
    #[cfg_attr(feature = "inline-more", inline)]
3356
0
    fn drop(&mut self) {
3357
0
        unsafe {
3358
0
            // SAFETY:
3359
0
            // 1. We call the function only once;
3360
0
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3361
0
            //    and [`TableLayout`] that were used to allocate this table.
3362
0
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3363
0
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3364
0
            //    so there won't be any table left in an inconsistent state.
3365
0
            self.table
3366
0
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3367
0
        }
3368
0
    }
Unexecuted instantiation: <hashbrown::raw::RawTable<usize> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::RawTable<_, _> as core::ops::drop::Drop>::drop
3369
}
3370
3371
impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3372
    type Item = T;
3373
    type IntoIter = RawIntoIter<T, A>;
3374
3375
    #[cfg_attr(feature = "inline-more", inline)]
3376
0
    fn into_iter(self) -> RawIntoIter<T, A> {
3377
0
        unsafe {
3378
0
            let iter = self.iter();
3379
0
            self.into_iter_from(iter)
3380
0
        }
3381
0
    }
3382
}
3383
3384
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3385
/// not track an item count.
3386
pub(crate) struct RawIterRange<T> {
3387
    // Mask of full buckets in the current group. Bits are cleared from this
3388
    // mask as each element is processed.
3389
    current_group: BitMaskIter,
3390
3391
    // Pointer to the buckets for the current group.
3392
    data: Bucket<T>,
3393
3394
    // Pointer to the next group of control bytes,
3395
    // Must be aligned to the group size.
3396
    next_ctrl: *const u8,
3397
3398
    // Pointer one past the last control byte of this range.
3399
    end: *const u8,
3400
}
3401
3402
impl<T> RawIterRange<T> {
3403
    /// Returns a `RawIterRange` covering a subset of a table.
3404
    ///
3405
    /// # Safety
3406
    ///
3407
    /// If any of the following conditions are violated, the result is
3408
    /// [`undefined behavior`]:
3409
    ///
3410
    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3411
    ///
3412
    /// * `ctrl` must be properly aligned to the group size (`Group::WIDTH`);
3413
    ///
3414
    /// * `ctrl` must point to the array of properly initialized control bytes;
3415
    ///
3416
    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3417
    ///
3418
    /// * the value of `len` must be less than or equal to the number of table buckets,
3419
    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3420
    ///   must be positive.
3421
    ///
3422
    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3423
    ///   byte past the end of the same [allocated table].
3424
    ///
3425
    /// * The `len` must be a power of two.
3426
    ///
3427
    /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
3428
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3429
    #[cfg_attr(feature = "inline-more", inline)]
3430
0
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3431
0
        debug_assert_ne!(len, 0);
3432
0
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3433
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3434
0
        let end = ctrl.add(len);
3435
0
3436
0
        // Load the first group and advance ctrl to point to the next group
3437
0
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3438
0
        let current_group = Group::load_aligned(ctrl.cast()).match_full();
3439
0
        let next_ctrl = ctrl.add(Group::WIDTH);
3440
0
3441
0
        Self {
3442
0
            current_group: current_group.into_iter(),
3443
0
            data,
3444
0
            next_ctrl,
3445
0
            end,
3446
0
        }
3447
0
    }
3448
3449
    /// Splits a `RawIterRange` into two halves.
3450
    ///
3451
    /// Returns `None` if the remaining range is smaller than or equal to the
3452
    /// group width.
3453
    #[cfg_attr(feature = "inline-more", inline)]
3454
    #[cfg(feature = "rayon")]
3455
    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3456
        unsafe {
3457
            if self.end <= self.next_ctrl {
3458
                // Nothing to split if the group that we are current processing
3459
                // is the last one.
3460
                (self, None)
3461
            } else {
3462
                // len is the remaining number of elements after the group that
3463
                // we are currently processing. It must be a multiple of the
3464
                // group size (small tables are caught by the check above).
3465
                let len = offset_from(self.end, self.next_ctrl);
3466
                debug_assert_eq!(len % Group::WIDTH, 0);
3467
3468
                // Split the remaining elements into two halves, but round the
3469
                // midpoint down in case there is an odd number of groups
3470
                // remaining. This ensures that:
3471
                // - The tail is at least 1 group long.
3472
                // - The split is roughly even considering we still have the
3473
                //   current group to process.
3474
                let mid = (len / 2) & !(Group::WIDTH - 1);
3475
3476
                let tail = Self::new(
3477
                    self.next_ctrl.add(mid),
3478
                    self.data.next_n(Group::WIDTH).next_n(mid),
3479
                    len - mid,
3480
                );
3481
                debug_assert_eq!(
3482
                    self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3483
                    tail.data.ptr
3484
                );
3485
                debug_assert_eq!(self.end, tail.end);
3486
                self.end = self.next_ctrl.add(mid);
3487
                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3488
                (self, Some(tail))
3489
            }
3490
        }
3491
    }
3492
3493
    /// # Safety
3494
    /// If `DO_CHECK_PTR_RANGE` is false, caller must ensure that we never try to iterate
3495
    /// after yielding all elements.
3496
    #[cfg_attr(feature = "inline-more", inline)]
3497
0
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3498
        loop {
3499
0
            if let Some(index) = self.current_group.next() {
3500
0
                return Some(self.data.next_n(index));
3501
0
            }
3502
0
3503
0
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3504
0
                return None;
3505
0
            }
3506
0
3507
0
            // We might read past self.end up to the next group boundary,
3508
0
            // but this is fine because it only occurs on tables smaller
3509
0
            // than the group size where the trailing control bytes are all
3510
0
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3511
0
            // to the group size (since tables are power-of-two sized).
3512
0
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3513
0
                .match_full()
3514
0
                .into_iter();
3515
0
            self.data = self.data.next_n(Group::WIDTH);
3516
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3517
        }
3518
0
    }
3519
3520
    /// Folds every element into an accumulator by applying an operation,
3521
    /// returning the final result.
3522
    ///
3523
    /// `fold_impl()` takes three arguments: the number of items remaining in
3524
    /// the iterator, an initial value, and a closure with two arguments: an
3525
    /// 'accumulator', and an element. The closure returns the value that the
3526
    /// accumulator should have for the next iteration.
3527
    ///
3528
    /// The initial value is the value the accumulator will have on the first call.
3529
    ///
3530
    /// After applying this closure to every element of the iterator, `fold_impl()`
3531
    /// returns the accumulator.
3532
    ///
3533
    /// # Safety
3534
    ///
3535
    /// If any of the following conditions are violated, the result is
3536
    /// [`Undefined Behavior`]:
3537
    ///
3538
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3539
    ///   i.e. table outlives the `RawIterRange`;
3540
    ///
3541
    /// * The provided `n` value must match the actual number of items
3542
    ///   in the table.
3543
    ///
3544
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3545
    #[allow(clippy::while_let_on_iterator)]
3546
    #[cfg_attr(feature = "inline-more", inline)]
3547
0
    unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
3548
0
    where
3549
0
        F: FnMut(B, Bucket<T>) -> B,
3550
0
    {
3551
        loop {
3552
0
            while let Some(index) = self.current_group.next() {
3553
                // The returned `index` will always be in the range `0..Group::WIDTH`,
3554
                // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
3555
0
                debug_assert!(n != 0);
3556
0
                let bucket = self.data.next_n(index);
3557
0
                acc = f(acc, bucket);
3558
0
                n -= 1;
3559
            }
3560
3561
0
            if n == 0 {
3562
0
                return acc;
3563
0
            }
3564
0
3565
0
            // SAFETY: The caller of this function ensures that:
3566
0
            //
3567
0
            // 1. The provided `n` value matches the actual number of items in the table;
3568
0
            // 2. The table is alive and did not moved.
3569
0
            //
3570
0
            // Taking the above into account, we always stay within the bounds, because:
3571
0
            //
3572
0
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3573
0
            //    we will never end up in the given branch, since we should have already
3574
0
            //    yielded all the elements of the table.
3575
0
            //
3576
0
            // 2. For tables larger than the group width. The number of buckets is a
3577
0
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3578
0
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3579
0
            //    start of the array of control bytes, and never try to iterate after
3580
0
            //    getting all the elements, the last `self.current_group` will read bytes
3581
0
            //    from the `self.buckets() - Group::WIDTH` index.  We know also that
3582
0
            //    `self.current_group.next()` will always return indices within the range
3583
0
            //    `0..Group::WIDTH`.
3584
0
            //
3585
0
            //    Knowing all of the above and taking into account that we are synchronizing
3586
0
            //    the `self.data` index with the index we used to read the `self.current_group`,
3587
0
            //    the subsequent `self.data.next_n(index)` will always return a bucket with
3588
0
            //    an index number less than `self.buckets()`.
3589
0
            //
3590
0
            //    The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
3591
0
            //    actually be read, since we should have already yielded all the elements of
3592
0
            //    the table.
3593
0
            self.current_group = Group::load_aligned(self.next_ctrl.cast())
3594
0
                .match_full()
3595
0
                .into_iter();
3596
0
            self.data = self.data.next_n(Group::WIDTH);
3597
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3598
        }
3599
0
    }
3600
}
3601
3602
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3603
// in the actual iterator implementations determine the real Send/Sync bounds.
3604
unsafe impl<T> Send for RawIterRange<T> {}
3605
unsafe impl<T> Sync for RawIterRange<T> {}
3606
3607
impl<T> Clone for RawIterRange<T> {
3608
    #[cfg_attr(feature = "inline-more", inline)]
3609
0
    fn clone(&self) -> Self {
3610
0
        Self {
3611
0
            data: self.data.clone(),
3612
0
            next_ctrl: self.next_ctrl,
3613
0
            current_group: self.current_group.clone(),
3614
0
            end: self.end,
3615
0
        }
3616
0
    }
3617
}
3618
3619
impl<T> Iterator for RawIterRange<T> {
3620
    type Item = Bucket<T>;
3621
3622
    #[cfg_attr(feature = "inline-more", inline)]
3623
0
    fn next(&mut self) -> Option<Bucket<T>> {
3624
0
        unsafe {
3625
0
            // SAFETY: We set checker flag to true.
3626
0
            self.next_impl::<true>()
3627
0
        }
3628
0
    }
3629
3630
    #[inline]
3631
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3632
        // We don't have an item count, so just guess based on the range size.
3633
0
        let remaining_buckets = if self.end > self.next_ctrl {
3634
0
            unsafe { offset_from(self.end, self.next_ctrl) }
3635
        } else {
3636
0
            0
3637
        };
3638
3639
        // Add a group width to include the group we are currently processing.
3640
0
        (0, Some(Group::WIDTH + remaining_buckets))
3641
0
    }
3642
}
3643
3644
impl<T> FusedIterator for RawIterRange<T> {}
3645
3646
/// Iterator which returns a raw pointer to every full bucket in the table.
3647
///
3648
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3649
/// must observe several rules when using it:
3650
/// - You must not free the hash table while iterating (including via growing/shrinking).
3651
/// - It is fine to erase a bucket that has been yielded by the iterator.
3652
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3653
///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3654
/// - It is unspecified whether an element inserted after the iterator was
3655
///   created will be yielded by that iterator (unless `reflect_insert` is called).
3656
/// - The order in which the iterator yields bucket is unspecified and may
3657
///   change in the future.
3658
pub struct RawIter<T> {
3659
    pub(crate) iter: RawIterRange<T>,
3660
    items: usize,
3661
}
3662
3663
impl<T> RawIter<T> {
3664
0
    unsafe fn drop_elements(&mut self) {
3665
0
        if T::NEEDS_DROP && self.items != 0 {
3666
0
            for item in self {
3667
0
                item.drop();
3668
0
            }
3669
0
        }
3670
0
    }
3671
}
3672
3673
impl<T> Clone for RawIter<T> {
3674
    #[cfg_attr(feature = "inline-more", inline)]
3675
0
    fn clone(&self) -> Self {
3676
0
        Self {
3677
0
            iter: self.iter.clone(),
3678
0
            items: self.items,
3679
0
        }
3680
0
    }
3681
}
3682
impl<T> Default for RawIter<T> {
3683
    #[cfg_attr(feature = "inline-more", inline)]
3684
0
    fn default() -> Self {
3685
0
        // SAFETY: Because the table is static, it always outlives the iter.
3686
0
        unsafe { RawTableInner::NEW.iter() }
3687
0
    }
3688
}
3689
3690
impl<T> Iterator for RawIter<T> {
3691
    type Item = Bucket<T>;
3692
3693
    #[cfg_attr(feature = "inline-more", inline)]
3694
0
    fn next(&mut self) -> Option<Bucket<T>> {
3695
0
        // Inner iterator iterates over buckets
3696
0
        // so it can do unnecessary work if we already yielded all items.
3697
0
        if self.items == 0 {
3698
0
            return None;
3699
0
        }
3700
0
3701
0
        let nxt = unsafe {
3702
0
            // SAFETY: We check number of items to yield using `items` field.
3703
0
            self.iter.next_impl::<false>()
3704
0
        };
3705
0
3706
0
        debug_assert!(nxt.is_some());
3707
0
        self.items -= 1;
3708
0
3709
0
        nxt
3710
0
    }
3711
3712
    #[inline]
3713
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3714
0
        (self.items, Some(self.items))
3715
0
    }
3716
3717
    #[inline]
3718
0
    fn fold<B, F>(self, init: B, f: F) -> B
3719
0
    where
3720
0
        Self: Sized,
3721
0
        F: FnMut(B, Self::Item) -> B,
3722
0
    {
3723
0
        unsafe { self.iter.fold_impl(self.items, init, f) }
3724
0
    }
3725
}
3726
3727
impl<T> ExactSizeIterator for RawIter<T> {}
3728
impl<T> FusedIterator for RawIter<T> {}
3729
3730
/// Iterator which returns an index of every full bucket in the table.
3731
///
3732
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3733
/// must observe several rules when using it:
3734
/// - You must not free the hash table while iterating (including via growing/shrinking).
3735
/// - It is fine to erase a bucket that has been yielded by the iterator.
3736
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3737
///   result in the iterator yielding index of that bucket.
3738
/// - It is unspecified whether an element inserted after the iterator was
3739
///   created will be yielded by that iterator.
3740
/// - The order in which the iterator yields indices of the buckets is unspecified
3741
///   and may change in the future.
3742
pub(crate) struct FullBucketsIndices {
3743
    // Mask of full buckets in the current group. Bits are cleared from this
3744
    // mask as each element is processed.
3745
    current_group: BitMaskIter,
3746
3747
    // Initial value of the bytes' indices of the current group (relative
3748
    // to the start of the control bytes).
3749
    group_first_index: usize,
3750
3751
    // Pointer to the current group of control bytes,
3752
    // Must be aligned to the group size (Group::WIDTH).
3753
    ctrl: NonNull<u8>,
3754
3755
    // Number of elements in the table.
3756
    items: usize,
3757
}
3758
3759
impl FullBucketsIndices {
3760
    /// Advances the iterator and returns the next value.
3761
    ///
3762
    /// # Safety
3763
    ///
3764
    /// If any of the following conditions are violated, the result is
3765
    /// [`Undefined Behavior`]:
3766
    ///
3767
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3768
    ///   i.e. table outlives the `FullBucketsIndices`;
3769
    ///
3770
    /// * It never tries to iterate after getting all elements.
3771
    ///
3772
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3773
    #[inline(always)]
3774
0
    unsafe fn next_impl(&mut self) -> Option<usize> {
3775
        loop {
3776
0
            if let Some(index) = self.current_group.next() {
3777
                // The returned `self.group_first_index + index` will always
3778
                // be in the range `0..self.buckets()`. See explanation below.
3779
0
                return Some(self.group_first_index + index);
3780
0
            }
3781
0
3782
0
            // SAFETY: The caller of this function ensures that:
3783
0
            //
3784
0
            // 1. It never tries to iterate after getting all the elements;
3785
0
            // 2. The table is alive and did not moved;
3786
0
            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
3787
0
            //
3788
0
            // Taking the above into account, we always stay within the bounds, because:
3789
0
            //
3790
0
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3791
0
            //    we will never end up in the given branch, since we should have already
3792
0
            //    yielded all the elements of the table.
3793
0
            //
3794
0
            // 2. For tables larger than the group width. The number of buckets is a
3795
0
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3796
0
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3797
0
            //    the start of the array of control bytes, and never try to iterate after
3798
0
            //    getting all the elements, the last `self.ctrl` will be equal to
3799
0
            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
3800
0
            //    will always contains indices within the range `0..Group::WIDTH`,
3801
0
            //    and subsequent `self.group_first_index + index` will always return a
3802
0
            //    number less than `self.buckets()`.
3803
0
            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
3804
0
3805
0
            // SAFETY: See explanation above.
3806
0
            self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast())
3807
0
                .match_full()
3808
0
                .into_iter();
3809
0
            self.group_first_index += Group::WIDTH;
3810
        }
3811
0
    }
3812
}
3813
3814
impl Iterator for FullBucketsIndices {
3815
    type Item = usize;
3816
3817
    /// Advances the iterator and returns the next value. It is up to
3818
    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
3819
    /// because we cannot make the `next` method unsafe.
3820
    #[inline(always)]
3821
0
    fn next(&mut self) -> Option<usize> {
3822
0
        // Return if we already yielded all items.
3823
0
        if self.items == 0 {
3824
0
            return None;
3825
0
        }
3826
0
3827
0
        let nxt = unsafe {
3828
0
            // SAFETY:
3829
0
            // 1. We check number of items to yield using `items` field.
3830
0
            // 2. The caller ensures that the table is alive and has not moved.
3831
0
            self.next_impl()
3832
0
        };
3833
0
3834
0
        debug_assert!(nxt.is_some());
3835
0
        self.items -= 1;
3836
0
3837
0
        nxt
3838
0
    }
3839
3840
    #[inline(always)]
3841
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3842
0
        (self.items, Some(self.items))
3843
0
    }
3844
}
3845
3846
impl ExactSizeIterator for FullBucketsIndices {}
3847
impl FusedIterator for FullBucketsIndices {}
3848
3849
/// Iterator which consumes a table and returns elements.
3850
pub struct RawIntoIter<T, A: Allocator = Global> {
3851
    iter: RawIter<T>,
3852
    allocation: Option<(NonNull<u8>, Layout, A)>,
3853
    marker: PhantomData<T>,
3854
}
3855
3856
impl<T, A: Allocator> RawIntoIter<T, A> {
3857
    #[cfg_attr(feature = "inline-more", inline)]
3858
0
    pub fn iter(&self) -> RawIter<T> {
3859
0
        self.iter.clone()
3860
0
    }
3861
}
3862
3863
unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
3864
where
3865
    T: Send,
3866
    A: Send,
3867
{
3868
}
3869
unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
3870
where
3871
    T: Sync,
3872
    A: Sync,
3873
{
3874
}
3875
3876
#[cfg(feature = "nightly")]
3877
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
3878
    #[cfg_attr(feature = "inline-more", inline)]
3879
    fn drop(&mut self) {
3880
        unsafe {
3881
            // Drop all remaining elements
3882
            self.iter.drop_elements();
3883
3884
            // Free the table
3885
            if let Some((ptr, layout, ref alloc)) = self.allocation {
3886
                alloc.deallocate(ptr, layout);
3887
            }
3888
        }
3889
    }
3890
}
3891
#[cfg(not(feature = "nightly"))]
3892
impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
3893
    #[cfg_attr(feature = "inline-more", inline)]
3894
0
    fn drop(&mut self) {
3895
0
        unsafe {
3896
0
            // Drop all remaining elements
3897
0
            self.iter.drop_elements();
3898
3899
            // Free the table
3900
0
            if let Some((ptr, layout, ref alloc)) = self.allocation {
3901
0
                alloc.deallocate(ptr, layout);
3902
0
            }
3903
        }
3904
0
    }
3905
}
3906
3907
impl<T, A: Allocator> Default for RawIntoIter<T, A> {
3908
0
    fn default() -> Self {
3909
0
        Self {
3910
0
            iter: Default::default(),
3911
0
            allocation: None,
3912
0
            marker: PhantomData,
3913
0
        }
3914
0
    }
3915
}
3916
impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
3917
    type Item = T;
3918
3919
    #[cfg_attr(feature = "inline-more", inline)]
3920
0
    fn next(&mut self) -> Option<T> {
3921
0
        unsafe { Some(self.iter.next()?.read()) }
3922
0
    }
3923
3924
    #[inline]
3925
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3926
0
        self.iter.size_hint()
3927
0
    }
3928
}
3929
3930
impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
3931
impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
3932
3933
/// Iterator which consumes elements without freeing the table storage.
3934
pub struct RawDrain<'a, T, A: Allocator = Global> {
3935
    iter: RawIter<T>,
3936
3937
    // The table is moved into the iterator for the duration of the drain. This
3938
    // ensures that an empty table is left if the drain iterator is leaked
3939
    // without dropping.
3940
    table: RawTableInner,
3941
    orig_table: NonNull<RawTableInner>,
3942
3943
    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
3944
    // covariant over T.
3945
    marker: PhantomData<&'a RawTable<T, A>>,
3946
}
3947
3948
impl<T, A: Allocator> RawDrain<'_, T, A> {
3949
    #[cfg_attr(feature = "inline-more", inline)]
3950
0
    pub fn iter(&self) -> RawIter<T> {
3951
0
        self.iter.clone()
3952
0
    }
3953
}
3954
3955
unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
3956
where
3957
    T: Send,
3958
    A: Send,
3959
{
3960
}
3961
unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
3962
where
3963
    T: Sync,
3964
    A: Sync,
3965
{
3966
}
3967
3968
impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
3969
    #[cfg_attr(feature = "inline-more", inline)]
3970
0
    fn drop(&mut self) {
3971
0
        unsafe {
3972
0
            // Drop all remaining elements. Note that this may panic.
3973
0
            self.iter.drop_elements();
3974
0
3975
0
            // Reset the contents of the table now that all elements have been
3976
0
            // dropped.
3977
0
            self.table.clear_no_drop();
3978
0
3979
0
            // Move the now empty table back to its original location.
3980
0
            self.orig_table
3981
0
                .as_ptr()
3982
0
                .copy_from_nonoverlapping(&self.table, 1);
3983
0
        }
3984
0
    }
3985
}
3986
3987
impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
3988
    type Item = T;
3989
3990
    #[cfg_attr(feature = "inline-more", inline)]
3991
0
    fn next(&mut self) -> Option<T> {
3992
        unsafe {
3993
0
            let item = self.iter.next()?;
3994
0
            Some(item.read())
3995
        }
3996
0
    }
3997
3998
    #[inline]
3999
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4000
0
        self.iter.size_hint()
4001
0
    }
4002
}
4003
4004
impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
4005
impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
4006
4007
/// Iterator over occupied buckets that could match a given hash.
4008
///
4009
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
4010
/// items that have a hash value different than the one provided. You should
4011
/// always validate the returned values before using them.
4012
///
4013
/// For maximum flexibility this iterator is not bound by a lifetime, but you
4014
/// must observe several rules when using it:
4015
/// - You must not free the hash table while iterating (including via growing/shrinking).
4016
/// - It is fine to erase a bucket that has been yielded by the iterator.
4017
/// - Erasing a bucket that has not yet been yielded by the iterator may still
4018
///   result in the iterator yielding that bucket.
4019
/// - It is unspecified whether an element inserted after the iterator was
4020
///   created will be yielded by that iterator.
4021
/// - The order in which the iterator yields buckets is unspecified and may
4022
///   change in the future.
4023
pub struct RawIterHash<T> {
4024
    inner: RawIterHashInner,
4025
    _marker: PhantomData<T>,
4026
}
4027
4028
#[derive(Clone)]
4029
struct RawIterHashInner {
4030
    // See `RawTableInner`'s corresponding fields for details.
4031
    // We can't store a `*const RawTableInner` as it would get
4032
    // invalidated by the user calling `&mut` methods on `RawTable`.
4033
    bucket_mask: usize,
4034
    ctrl: NonNull<u8>,
4035
4036
    // The top 7 bits of the hash.
4037
    tag_hash: Tag,
4038
4039
    // The sequence of groups to probe in the search.
4040
    probe_seq: ProbeSeq,
4041
4042
    group: Group,
4043
4044
    // The elements within the group with a matching tag-hash.
4045
    bitmask: BitMaskIter,
4046
}
4047
4048
impl<T> RawIterHash<T> {
4049
    #[cfg_attr(feature = "inline-more", inline)]
4050
0
    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
4051
0
        RawIterHash {
4052
0
            inner: RawIterHashInner::new(&table.table, hash),
4053
0
            _marker: PhantomData,
4054
0
        }
4055
0
    }
4056
}
4057
4058
impl<T> Clone for RawIterHash<T> {
4059
    #[cfg_attr(feature = "inline-more", inline)]
4060
0
    fn clone(&self) -> Self {
4061
0
        Self {
4062
0
            inner: self.inner.clone(),
4063
0
            _marker: PhantomData,
4064
0
        }
4065
0
    }
4066
}
4067
4068
impl<T> Default for RawIterHash<T> {
4069
    #[cfg_attr(feature = "inline-more", inline)]
4070
0
    fn default() -> Self {
4071
0
        Self {
4072
0
            // SAFETY: Because the table is static, it always outlives the iter.
4073
0
            inner: unsafe { RawIterHashInner::new(&RawTableInner::NEW, 0) },
4074
0
            _marker: PhantomData,
4075
0
        }
4076
0
    }
4077
}
4078
4079
impl RawIterHashInner {
4080
    #[cfg_attr(feature = "inline-more", inline)]
4081
0
    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
4082
0
        let tag_hash = Tag::full(hash);
4083
0
        let probe_seq = table.probe_seq(hash);
4084
0
        let group = Group::load(table.ctrl(probe_seq.pos));
4085
0
        let bitmask = group.match_tag(tag_hash).into_iter();
4086
0
4087
0
        RawIterHashInner {
4088
0
            bucket_mask: table.bucket_mask,
4089
0
            ctrl: table.ctrl,
4090
0
            tag_hash,
4091
0
            probe_seq,
4092
0
            group,
4093
0
            bitmask,
4094
0
        }
4095
0
    }
4096
}
4097
4098
impl<T> Iterator for RawIterHash<T> {
4099
    type Item = Bucket<T>;
4100
4101
0
    fn next(&mut self) -> Option<Bucket<T>> {
4102
0
        unsafe {
4103
0
            match self.inner.next() {
4104
0
                Some(index) => {
4105
0
                    // Can't use `RawTable::bucket` here as we don't have
4106
0
                    // an actual `RawTable` reference to use.
4107
0
                    debug_assert!(index <= self.inner.bucket_mask);
4108
0
                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4109
0
                    Some(bucket)
4110
                }
4111
0
                None => None,
4112
            }
4113
        }
4114
0
    }
4115
}
4116
4117
impl Iterator for RawIterHashInner {
4118
    type Item = usize;
4119
4120
0
    fn next(&mut self) -> Option<Self::Item> {
4121
        unsafe {
4122
            loop {
4123
0
                if let Some(bit) = self.bitmask.next() {
4124
0
                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4125
0
                    return Some(index);
4126
0
                }
4127
0
                if likely(self.group.match_empty().any_bit_set()) {
4128
0
                    return None;
4129
0
                }
4130
0
                self.probe_seq.move_next(self.bucket_mask);
4131
0
4132
0
                // Can't use `RawTableInner::ctrl` here as we don't have
4133
0
                // an actual `RawTableInner` reference to use.
4134
0
                let index = self.probe_seq.pos;
4135
0
                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4136
0
                let group_ctrl = self.ctrl.as_ptr().add(index).cast();
4137
0
4138
0
                self.group = Group::load(group_ctrl);
4139
0
                self.bitmask = self.group.match_tag(self.tag_hash).into_iter();
4140
            }
4141
        }
4142
0
    }
4143
}
4144
4145
pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
4146
    pub iter: RawIter<T>,
4147
    pub table: &'a mut RawTable<T, A>,
4148
}
4149
4150
impl<T, A: Allocator> RawExtractIf<'_, T, A> {
4151
    #[cfg_attr(feature = "inline-more", inline)]
4152
0
    pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
4153
0
    where
4154
0
        F: FnMut(&mut T) -> bool,
4155
0
    {
4156
        unsafe {
4157
0
            for item in &mut self.iter {
4158
0
                if f(item.as_mut()) {
4159
0
                    return Some(self.table.remove(item).0);
4160
0
                }
4161
            }
4162
        }
4163
0
        None
4164
0
    }
4165
}
4166
4167
#[cfg(test)]
4168
mod test_map {
4169
    use super::*;
4170
4171
    #[test]
4172
    fn test_minimum_capacity_for_small_types() {
4173
        #[track_caller]
4174
        fn test_t<T>() {
4175
            let raw_table: RawTable<T> = RawTable::with_capacity(1);
4176
            let actual_buckets = raw_table.buckets();
4177
            let min_buckets = Group::WIDTH / core::mem::size_of::<T>();
4178
            assert!(
4179
                actual_buckets >= min_buckets,
4180
                "expected at least {min_buckets} buckets, got {actual_buckets} buckets"
4181
            );
4182
        }
4183
4184
        test_t::<u8>();
4185
4186
        // This is only "small" for some platforms, like x86_64 with SSE2, but
4187
        // there's no harm in running it on other platforms.
4188
        test_t::<u16>();
4189
    }
4190
4191
    fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
4192
        unsafe {
4193
            table.table.rehash_in_place(
4194
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
4195
                mem::size_of::<T>(),
4196
                if mem::needs_drop::<T>() {
4197
                    Some(|ptr| ptr::drop_in_place(ptr as *mut T))
4198
                } else {
4199
                    None
4200
                },
4201
            );
4202
        }
4203
    }
4204
4205
    #[test]
4206
    fn rehash() {
4207
        let mut table = RawTable::new();
4208
        let hasher = |i: &u64| *i;
4209
        for i in 0..100 {
4210
            table.insert(i, i, hasher);
4211
        }
4212
4213
        for i in 0..100 {
4214
            unsafe {
4215
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4216
            }
4217
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4218
        }
4219
4220
        rehash_in_place(&mut table, hasher);
4221
4222
        for i in 0..100 {
4223
            unsafe {
4224
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4225
            }
4226
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4227
        }
4228
    }
4229
4230
    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4231
    /// AN UNINITIALIZED TABLE DURING THE DROP
4232
    #[test]
4233
    fn test_drop_uninitialized() {
4234
        use ::alloc::vec::Vec;
4235
4236
        let table = unsafe {
4237
            // SAFETY: The `buckets` is power of two and we're not
4238
            // trying to actually use the returned RawTable.
4239
            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4240
                .unwrap()
4241
        };
4242
        drop(table);
4243
    }
4244
4245
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4246
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4247
    #[test]
4248
    fn test_drop_zero_items() {
4249
        use ::alloc::vec::Vec;
4250
        unsafe {
4251
            // SAFETY: The `buckets` is power of two and we're not
4252
            // trying to actually use the returned RawTable.
4253
            let mut table =
4254
                RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4255
                    .unwrap();
4256
4257
            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4258
4259
            // SAFETY: We checked that the table is allocated and therefore the table already has
4260
            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4261
            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4262
            table.table.ctrl_slice().fill_empty();
4263
4264
            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4265
            table.table.ctrl(0).write_bytes(0, table.capacity());
4266
4267
            // Fix up the trailing control bytes. See the comments in set_ctrl
4268
            // for the handling of tables smaller than the group width.
4269
            if table.buckets() < Group::WIDTH {
4270
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4271
                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4272
                // `Group::WIDTH` is safe
4273
                table
4274
                    .table
4275
                    .ctrl(0)
4276
                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4277
            } else {
4278
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4279
                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4280
                // to `self.buckets() == self.bucket_mask + 1` is safe
4281
                table
4282
                    .table
4283
                    .ctrl(0)
4284
                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4285
            }
4286
            drop(table);
4287
        }
4288
    }
4289
4290
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4291
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4292
    #[test]
4293
    fn test_catch_panic_clone_from() {
4294
        use super::{AllocError, Allocator, Global};
4295
        use ::alloc::sync::Arc;
4296
        use ::alloc::vec::Vec;
4297
        use core::sync::atomic::{AtomicI8, Ordering};
4298
        use std::thread;
4299
4300
        struct MyAllocInner {
4301
            drop_count: Arc<AtomicI8>,
4302
        }
4303
4304
        #[derive(Clone)]
4305
        struct MyAlloc {
4306
            _inner: Arc<MyAllocInner>,
4307
        }
4308
4309
        impl Drop for MyAllocInner {
4310
            fn drop(&mut self) {
4311
                println!("MyAlloc freed.");
4312
                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4313
            }
4314
        }
4315
4316
        unsafe impl Allocator for MyAlloc {
4317
            fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
4318
                let g = Global;
4319
                g.allocate(layout)
4320
            }
4321
4322
            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4323
                let g = Global;
4324
                g.deallocate(ptr, layout)
4325
            }
4326
        }
4327
4328
        const DISARMED: bool = false;
4329
        const ARMED: bool = true;
4330
4331
        struct CheckedCloneDrop {
4332
            panic_in_clone: bool,
4333
            dropped: bool,
4334
            need_drop: Vec<u64>,
4335
        }
4336
4337
        impl Clone for CheckedCloneDrop {
4338
            fn clone(&self) -> Self {
4339
                if self.panic_in_clone {
4340
                    panic!("panic in clone")
4341
                }
4342
                Self {
4343
                    panic_in_clone: self.panic_in_clone,
4344
                    dropped: self.dropped,
4345
                    need_drop: self.need_drop.clone(),
4346
                }
4347
            }
4348
        }
4349
4350
        impl Drop for CheckedCloneDrop {
4351
            fn drop(&mut self) {
4352
                if self.dropped {
4353
                    panic!("double drop");
4354
                }
4355
                self.dropped = true;
4356
            }
4357
        }
4358
4359
        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4360
4361
        let mut table = RawTable::new_in(MyAlloc {
4362
            _inner: Arc::new(MyAllocInner {
4363
                drop_count: dropped.clone(),
4364
            }),
4365
        });
4366
4367
        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4368
            let idx = idx as u64;
4369
            table.insert(
4370
                idx,
4371
                (
4372
                    idx,
4373
                    CheckedCloneDrop {
4374
                        panic_in_clone,
4375
                        dropped: false,
4376
                        need_drop: vec![idx],
4377
                    },
4378
                ),
4379
                |(k, _)| *k,
4380
            );
4381
        }
4382
4383
        assert_eq!(table.len(), 7);
4384
4385
        thread::scope(|s| {
4386
            let result = s.spawn(|| {
4387
                let armed_flags = [
4388
                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4389
                ];
4390
                let mut scope_table = RawTable::new_in(MyAlloc {
4391
                    _inner: Arc::new(MyAllocInner {
4392
                        drop_count: dropped.clone(),
4393
                    }),
4394
                });
4395
                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4396
                    let idx = idx as u64;
4397
                    scope_table.insert(
4398
                        idx,
4399
                        (
4400
                            idx,
4401
                            CheckedCloneDrop {
4402
                                panic_in_clone,
4403
                                dropped: false,
4404
                                need_drop: vec![idx + 100],
4405
                            },
4406
                        ),
4407
                        |(k, _)| *k,
4408
                    );
4409
                }
4410
                table.clone_from(&scope_table);
4411
            });
4412
            assert!(result.join().is_err());
4413
        });
4414
4415
        // Let's check that all iterators work fine and do not return elements
4416
        // (especially `RawIterRange`, which does not depend on the number of
4417
        // elements in the table, but looks directly at the control bytes)
4418
        //
4419
        // SAFETY: We know for sure that `RawTable` will outlive
4420
        // the returned `RawIter / RawIterRange` iterator.
4421
        assert_eq!(table.len(), 0);
4422
        assert_eq!(unsafe { table.iter().count() }, 0);
4423
        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4424
4425
        for idx in 0..table.buckets() {
4426
            let idx = idx as u64;
4427
            assert!(
4428
                table.find(idx, |(k, _)| *k == idx).is_none(),
4429
                "Index: {idx}"
4430
            );
4431
        }
4432
4433
        // All allocator clones should already be dropped.
4434
        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4435
    }
4436
}