Coverage Report

Created: 2024-08-22 06:13

/rust/registry/src/index.crates.io-6f17d22bba15001f/hashbrown-0.14.5/src/raw/mod.rs
Line
Count
Source (jump to first uncovered line)
1
use crate::alloc::alloc::{handle_alloc_error, Layout};
2
use crate::scopeguard::{guard, ScopeGuard};
3
use crate::TryReserveError;
4
use core::iter::FusedIterator;
5
use core::marker::PhantomData;
6
use core::mem;
7
use core::mem::MaybeUninit;
8
use core::ptr::NonNull;
9
use core::{hint, ptr};
10
11
cfg_if! {
12
    // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
13
    // at once instead of 8. We don't bother with AVX since it would require
14
    // runtime dispatch and wouldn't gain us much anyways: the probability of
15
    // finding a match drops off drastically after the first few buckets.
16
    //
17
    // I attempted an implementation on ARM using NEON instructions, but it
18
    // turns out that most NEON instructions have multi-cycle latency, which in
19
    // the end outweighs any gains over the generic implementation.
20
    if #[cfg(all(
21
        target_feature = "sse2",
22
        any(target_arch = "x86", target_arch = "x86_64"),
23
        not(miri),
24
    ))] {
25
        mod sse2;
26
        use sse2 as imp;
27
    } else if #[cfg(all(
28
        target_arch = "aarch64",
29
        target_feature = "neon",
30
        // NEON intrinsics are currently broken on big-endian targets.
31
        // See https://github.com/rust-lang/stdarch/issues/1484.
32
        target_endian = "little",
33
        not(miri),
34
    ))] {
35
        mod neon;
36
        use neon as imp;
37
    } else {
38
        mod generic;
39
        use generic as imp;
40
    }
41
}
42
43
mod alloc;
44
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
45
46
mod bitmask;
47
48
use self::bitmask::BitMaskIter;
49
use self::imp::Group;
50
51
// Branch prediction hint. This is currently only available on nightly but it
52
// consistently improves performance by 10-15%.
53
#[cfg(not(feature = "nightly"))]
54
use core::convert::identity as likely;
55
#[cfg(not(feature = "nightly"))]
56
use core::convert::identity as unlikely;
57
#[cfg(feature = "nightly")]
58
use core::intrinsics::{likely, unlikely};
59
60
// FIXME: use strict provenance functions once they are stable.
61
// Implement it with a transmute for now.
62
#[inline(always)]
63
#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here
64
0
fn invalid_mut<T>(addr: usize) -> *mut T {
65
0
    unsafe { core::mem::transmute(addr) }
66
0
}
Unexecuted instantiation: hashbrown::raw::inner::invalid_mut::<_>
Unexecuted instantiation: hashbrown::raw::inner::invalid_mut::<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>
Unexecuted instantiation: hashbrown::raw::inner::invalid_mut::<(gix_hash::object_id::ObjectId, u32)>
67
68
#[inline]
69
0
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
70
0
    to.offset_from(from) as usize
71
0
}
72
73
/// Whether memory allocation errors should return an error or abort.
74
#[derive(Copy, Clone)]
75
enum Fallibility {
76
    Fallible,
77
    Infallible,
78
}
79
80
impl Fallibility {
81
    /// Error to return on capacity overflow.
82
    #[cfg_attr(feature = "inline-more", inline)]
83
0
    fn capacity_overflow(self) -> TryReserveError {
84
0
        match self {
85
0
            Fallibility::Fallible => TryReserveError::CapacityOverflow,
86
0
            Fallibility::Infallible => panic!("Hash table capacity overflow"),
87
        }
88
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Fallibility>::capacity_overflow
Unexecuted instantiation: <hashbrown::raw::inner::Fallibility>::capacity_overflow
89
90
    /// Error to return on allocation error.
91
    #[cfg_attr(feature = "inline-more", inline)]
92
0
    fn alloc_err(self, layout: Layout) -> TryReserveError {
93
0
        match self {
94
0
            Fallibility::Fallible => TryReserveError::AllocError { layout },
95
0
            Fallibility::Infallible => handle_alloc_error(layout),
96
        }
97
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Fallibility>::alloc_err
Unexecuted instantiation: <hashbrown::raw::inner::Fallibility>::alloc_err
98
}
99
100
trait SizedTypeProperties: Sized {
101
    const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
102
    const NEEDS_DROP: bool = mem::needs_drop::<Self>();
103
}
104
105
impl<T> SizedTypeProperties for T {}
106
107
/// Control byte value for an empty bucket.
108
const EMPTY: u8 = 0b1111_1111;
109
110
/// Control byte value for a deleted bucket.
111
const DELETED: u8 = 0b1000_0000;
112
113
/// Checks whether a control byte represents a full bucket (top bit is clear).
114
#[inline]
115
0
fn is_full(ctrl: u8) -> bool {
116
0
    ctrl & 0x80 == 0
117
0
}
Unexecuted instantiation: hashbrown::raw::inner::is_full
Unexecuted instantiation: hashbrown::raw::inner::is_full
118
119
/// Checks whether a control byte represents a special value (top bit is set).
120
#[inline]
121
0
fn is_special(ctrl: u8) -> bool {
122
0
    ctrl & 0x80 != 0
123
0
}
Unexecuted instantiation: hashbrown::raw::inner::is_special
Unexecuted instantiation: hashbrown::raw::inner::is_special
124
125
/// Checks whether a special control value is EMPTY (just check 1 bit).
126
#[inline]
127
0
fn special_is_empty(ctrl: u8) -> bool {
128
0
    debug_assert!(is_special(ctrl));
129
0
    ctrl & 0x01 != 0
130
0
}
Unexecuted instantiation: hashbrown::raw::inner::special_is_empty
Unexecuted instantiation: hashbrown::raw::inner::special_is_empty
131
132
/// Primary hash function, used to select the initial bucket to probe from.
133
#[inline]
134
#[allow(clippy::cast_possible_truncation)]
135
0
fn h1(hash: u64) -> usize {
136
0
    // On 32-bit platforms we simply ignore the higher hash bits.
137
0
    hash as usize
138
0
}
Unexecuted instantiation: hashbrown::raw::inner::h1
Unexecuted instantiation: hashbrown::raw::inner::h1
139
140
// Constant for h2 function that grabing the top 7 bits of the hash.
141
const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
142
    mem::size_of::<usize>()
143
} else {
144
    mem::size_of::<u64>()
145
};
146
147
/// Secondary hash function, saved in the low 7 bits of the control byte.
148
#[inline]
149
#[allow(clippy::cast_possible_truncation)]
150
0
fn h2(hash: u64) -> u8 {
151
0
    // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
152
0
    // value, some hash functions (such as FxHash) produce a usize result
153
0
    // instead, which means that the top 32 bits are 0 on 32-bit platforms.
154
0
    // So we use MIN_HASH_LEN constant to handle this.
155
0
    let top7 = hash >> (MIN_HASH_LEN * 8 - 7);
156
0
    (top7 & 0x7f) as u8 // truncation
157
0
}
Unexecuted instantiation: hashbrown::raw::inner::h2
Unexecuted instantiation: hashbrown::raw::inner::h2
158
159
/// Probe sequence based on triangular numbers, which is guaranteed (since our
160
/// table size is a power of two) to visit every group of elements exactly once.
161
///
162
/// A triangular probe has us jump by 1 more group every time. So first we
163
/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
164
/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
165
///
166
/// Proof that the probe will visit every group in the table:
167
/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
168
struct ProbeSeq {
169
    pos: usize,
170
    stride: usize,
171
}
172
173
impl ProbeSeq {
174
    #[inline]
175
0
    fn move_next(&mut self, bucket_mask: usize) {
176
0
        // We should have found an empty bucket by now and ended the probe.
177
0
        debug_assert!(
178
0
            self.stride <= bucket_mask,
179
0
            "Went past end of probe sequence"
180
        );
181
182
0
        self.stride += Group::WIDTH;
183
0
        self.pos += self.stride;
184
0
        self.pos &= bucket_mask;
185
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::ProbeSeq>::move_next
Unexecuted instantiation: <hashbrown::raw::inner::ProbeSeq>::move_next
186
}
187
188
/// Returns the number of buckets needed to hold the given number of items,
189
/// taking the maximum load factor into account.
190
///
191
/// Returns `None` if an overflow occurs.
192
// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
193
#[cfg_attr(target_os = "emscripten", inline(never))]
194
#[cfg_attr(not(target_os = "emscripten"), inline)]
195
0
fn capacity_to_buckets(cap: usize) -> Option<usize> {
196
0
    debug_assert_ne!(cap, 0);
197
198
    // For small tables we require at least 1 empty bucket so that lookups are
199
    // guaranteed to terminate if an element doesn't exist in the table.
200
0
    if cap < 8 {
201
        // We don't bother with a table size of 2 buckets since that can only
202
        // hold a single element. Instead we skip directly to a 4 bucket table
203
        // which can hold 3 elements.
204
0
        return Some(if cap < 4 { 4 } else { 8 });
205
0
    }
206
207
    // Otherwise require 1/8 buckets to be empty (87.5% load)
208
    //
209
    // Be careful when modifying this, calculate_layout relies on the
210
    // overflow check here.
211
0
    let adjusted_cap = cap.checked_mul(8)? / 7;
212
213
    // Any overflows will have been caught by the checked_mul. Also, any
214
    // rounding errors from the division above will be cleaned up by
215
    // next_power_of_two (which can't overflow because of the previous division).
216
0
    Some(adjusted_cap.next_power_of_two())
217
0
}
Unexecuted instantiation: hashbrown::raw::inner::capacity_to_buckets
Unexecuted instantiation: hashbrown::raw::inner::capacity_to_buckets
218
219
/// Returns the maximum effective capacity for the given bucket mask, taking
220
/// the maximum load factor into account.
221
#[inline]
222
0
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
223
0
    if bucket_mask < 8 {
224
        // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
225
        // Keep in mind that the bucket mask is one less than the bucket count.
226
0
        bucket_mask
227
    } else {
228
        // For larger tables we reserve 12.5% of the slots as empty.
229
0
        ((bucket_mask + 1) / 8) * 7
230
    }
231
0
}
Unexecuted instantiation: hashbrown::raw::inner::bucket_mask_to_capacity
Unexecuted instantiation: hashbrown::raw::inner::bucket_mask_to_capacity
232
233
/// Helper which allows the max calculation for ctrl_align to be statically computed for each T
234
/// while keeping the rest of `calculate_layout_for` independent of `T`
235
#[derive(Copy, Clone)]
236
struct TableLayout {
237
    size: usize,
238
    ctrl_align: usize,
239
}
240
241
impl TableLayout {
242
    #[inline]
243
0
    const fn new<T>() -> Self {
244
0
        let layout = Layout::new::<T>();
245
0
        Self {
246
0
            size: layout.size(),
247
0
            ctrl_align: if layout.align() > Group::WIDTH {
248
0
                layout.align()
249
            } else {
250
0
                Group::WIDTH
251
            },
252
        }
253
0
    }
254
255
    #[inline]
256
0
    fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
257
0
        debug_assert!(buckets.is_power_of_two());
258
259
0
        let TableLayout { size, ctrl_align } = self;
260
        // Manual layout calculation since Layout methods are not yet stable.
261
0
        let ctrl_offset =
262
0
            size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
263
0
        let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
264
265
        // We need an additional check to ensure that the allocation doesn't
266
        // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
267
0
        if len > isize::MAX as usize - (ctrl_align - 1) {
268
0
            return None;
269
0
        }
270
0
271
0
        Some((
272
0
            unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
273
0
            ctrl_offset,
274
0
        ))
275
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::TableLayout>::calculate_layout_for
Unexecuted instantiation: <hashbrown::raw::inner::TableLayout>::calculate_layout_for
276
}
277
278
/// A reference to an empty bucket into which an can be inserted.
279
pub struct InsertSlot {
280
    index: usize,
281
}
282
283
/// A reference to a hash table bucket containing a `T`.
284
///
285
/// This is usually just a pointer to the element itself. However if the element
286
/// is a ZST, then we instead track the index of the element in the table so
287
/// that `erase` works properly.
288
pub struct Bucket<T> {
289
    // Actually it is pointer to next element than element itself
290
    // this is needed to maintain pointer arithmetic invariants
291
    // keeping direct pointer to element introduces difficulty.
292
    // Using `NonNull` for variance and niche layout
293
    ptr: NonNull<T>,
294
}
295
296
// This Send impl is needed for rayon support. This is safe since Bucket is
297
// never exposed in a public API.
298
unsafe impl<T> Send for Bucket<T> {}
299
300
impl<T> Clone for Bucket<T> {
301
    #[inline]
302
0
    fn clone(&self) -> Self {
303
0
        Self { ptr: self.ptr }
304
0
    }
305
}
306
307
impl<T> Bucket<T> {
308
    /// Creates a [`Bucket`] that contain pointer to the data.
309
    /// The pointer calculation is performed by calculating the
310
    /// offset from given `base` pointer (convenience for
311
    /// `base.as_ptr().sub(index)`).
312
    ///
313
    /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
314
    /// offset of `3 * size_of::<T>()` bytes.
315
    ///
316
    /// If the `T` is a ZST, then we instead track the index of the element
317
    /// in the table so that `erase` works properly (return
318
    /// `NonNull::new_unchecked((index + 1) as *mut T)`)
319
    ///
320
    /// # Safety
321
    ///
322
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
323
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
324
    /// rules of [`NonNull::new_unchecked`] function.
325
    ///
326
    /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
327
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
328
    /// logic of the work of this crate, the following rules are necessary and
329
    /// sufficient:
330
    ///
331
    /// * the `base` pointer must not be `dangling` and must points to the
332
    ///   end of the first `value element` from the `data part` of the table, i.e.
333
    ///   must be the pointer that returned by [`RawTable::data_end`] or by
334
    ///   [`RawTableInner::data_end<T>`];
335
    ///
336
    /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
337
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
338
    ///   must be no greater than the number returned by the function
339
    ///   [`RawTable::buckets`] or [`RawTableInner::buckets`].
340
    ///
341
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
342
    /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
343
    /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
344
    /// must be no greater than the number returned by the function
345
    /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
346
    ///
347
    /// [`Bucket`]: crate::raw::Bucket
348
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
349
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
350
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
351
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
352
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
353
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
354
    #[inline]
355
0
    unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
356
        // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
357
        // the data part of the table (we start counting from "0", so that
358
        // in the expression T[last], the "last" index actually one less than the
359
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
360
        //
361
        //                   `from_base_index(base, 1).as_ptr()` returns a pointer that
362
        //                   points here in the data part of the table
363
        //                   (to the start of T1)
364
        //                        |
365
        //                        |        `base: NonNull<T>` must point here
366
        //                        |         (to the end of T0 or to the start of C0)
367
        //                        v         v
368
        // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
369
        //                           ^
370
        //                           `from_base_index(base, 1)` returns a pointer
371
        //                           that points here in the data part of the table
372
        //                           (to the end of T1)
373
        //
374
        // where: T0...Tlast - our stored data; C0...Clast - control bytes
375
        // or metadata for data.
376
0
        let ptr = if T::IS_ZERO_SIZED {
377
            // won't overflow because index must be less than length (bucket_mask)
378
            // and bucket_mask is guaranteed to be less than `isize::MAX`
379
            // (see TableLayout::calculate_layout_for method)
380
0
            invalid_mut(index + 1)
381
        } else {
382
0
            base.as_ptr().sub(index)
383
        };
384
0
        Self {
385
0
            ptr: NonNull::new_unchecked(ptr),
386
0
        }
387
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<_>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::from_base_index
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, u32)>>::from_base_index
388
389
    /// Calculates the index of a [`Bucket`] as distance between two pointers
390
    /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
391
    /// The returned value is in units of T: the distance in bytes divided by
392
    /// [`core::mem::size_of::<T>()`].
393
    ///
394
    /// If the `T` is a ZST, then we return the index of the element in
395
    /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
396
    ///
397
    /// This function is the inverse of [`from_base_index`].
398
    ///
399
    /// # Safety
400
    ///
401
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
402
    /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
403
    ///
404
    /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
405
    /// method, as well as for the correct logic of the work of this crate, the
406
    /// following rules are necessary and sufficient:
407
    ///
408
    /// * `base` contained pointer must not be `dangling` and must point to the
409
    ///   end of the first `element` from the `data part` of the table, i.e.
410
    ///   must be a pointer that returns by [`RawTable::data_end`] or by
411
    ///   [`RawTableInner::data_end<T>`];
412
    ///
413
    /// * `self` also must not contain dangling pointer;
414
    ///
415
    /// * both `self` and `base` must be created from the same [`RawTable`]
416
    ///   (or [`RawTableInner`]).
417
    ///
418
    /// If `mem::size_of::<T>() == 0`, this function is always safe.
419
    ///
420
    /// [`Bucket`]: crate::raw::Bucket
421
    /// [`from_base_index`]: crate::raw::Bucket::from_base_index
422
    /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
423
    /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
424
    /// [`RawTable`]: crate::raw::RawTable
425
    /// [`RawTableInner`]: RawTableInner
426
    /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
427
    #[inline]
428
0
    unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
429
0
        // If mem::size_of::<T>() != 0 then return an index under which we used to store the
430
0
        // `element` in the data part of the table (we start counting from "0", so
431
0
        // that in the expression T[last], the "last" index actually is one less than the
432
0
        // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
433
0
        // For example for 5th element in table calculation is performed like this:
434
0
        //
435
0
        //                        mem::size_of::<T>()
436
0
        //                          |
437
0
        //                          |         `self = from_base_index(base, 5)` that returns pointer
438
0
        //                          |         that points here in tha data part of the table
439
0
        //                          |         (to the end of T5)
440
0
        //                          |           |                    `base: NonNull<T>` must point here
441
0
        //                          v           |                    (to the end of T0 or to the start of C0)
442
0
        //                        /???\         v                      v
443
0
        // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
444
0
        //                                      \__________  __________/
445
0
        //                                                 \/
446
0
        //                                     `bucket.to_base_index(base)` = 5
447
0
        //                                     (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
448
0
        //
449
0
        // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
450
0
        if T::IS_ZERO_SIZED {
451
            // this can not be UB
452
0
            self.ptr.as_ptr() as usize - 1
453
        } else {
454
0
            offset_from(base.as_ptr(), self.ptr.as_ptr())
455
        }
456
0
    }
457
458
    /// Acquires the underlying raw pointer `*mut T` to `data`.
459
    ///
460
    /// # Note
461
    ///
462
    /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
463
    /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
464
    /// for properly dropping the data we also need to clear `data` control bytes. If we
465
    /// drop data, but do not clear `data control byte` it leads to double drop when
466
    /// [`RawTable`] goes out of scope.
467
    ///
468
    /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
469
    /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
470
    /// will not re-evaluate where the new value should go, meaning the value may become
471
    /// "lost" if their location does not reflect their state.
472
    ///
473
    /// [`RawTable`]: crate::raw::RawTable
474
    /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
475
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
476
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
477
    ///
478
    /// # Examples
479
    ///
480
    /// ```
481
    /// # #[cfg(feature = "raw")]
482
    /// # fn test() {
483
    /// use core::hash::{BuildHasher, Hash};
484
    /// use hashbrown::raw::{Bucket, RawTable};
485
    ///
486
    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
487
    ///
488
    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
489
    ///     use core::hash::Hasher;
490
    ///     let mut state = hash_builder.build_hasher();
491
    ///     key.hash(&mut state);
492
    ///     state.finish()
493
    /// }
494
    ///
495
    /// let hash_builder = NewHashBuilder::default();
496
    /// let mut table = RawTable::new();
497
    ///
498
    /// let value = ("a", 100);
499
    /// let hash = make_hash(&hash_builder, &value.0);
500
    ///
501
    /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
502
    ///
503
    /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap();
504
    ///
505
    /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100));
506
    /// # }
507
    /// # fn main() {
508
    /// #     #[cfg(feature = "raw")]
509
    /// #     test()
510
    /// # }
511
    /// ```
512
    #[inline]
513
0
    pub fn as_ptr(&self) -> *mut T {
514
0
        if T::IS_ZERO_SIZED {
515
            // Just return an arbitrary ZST pointer which is properly aligned
516
            // invalid pointer is good enough for ZST
517
0
            invalid_mut(mem::align_of::<T>())
518
        } else {
519
0
            unsafe { self.ptr.as_ptr().sub(1) }
520
        }
521
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<_>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::as_ptr
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, u32)>>::as_ptr
522
523
    /// Create a new [`Bucket`] that is offset from the `self` by the given
524
    /// `offset`. The pointer calculation is performed by calculating the
525
    /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
526
    /// This function is used for iterators.
527
    ///
528
    /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
529
    /// offset of `3 * size_of::<T>()` bytes.
530
    ///
531
    /// # Safety
532
    ///
533
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
534
    /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
535
    /// rules of [`NonNull::new_unchecked`] function.
536
    ///
537
    /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
538
    /// and [`NonNull::new_unchecked`] function, as well as for the correct
539
    /// logic of the work of this crate, the following rules are necessary and
540
    /// sufficient:
541
    ///
542
    /// * `self` contained pointer must not be `dangling`;
543
    ///
544
    /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
545
    ///   i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
546
    ///   words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
547
    ///   by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
548
    ///
549
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
550
    /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
551
    /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
552
    /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
553
    /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
554
    ///
555
    /// [`Bucket`]: crate::raw::Bucket
556
    /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
557
    /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
558
    /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
559
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
560
    #[inline]
561
0
    unsafe fn next_n(&self, offset: usize) -> Self {
562
0
        let ptr = if T::IS_ZERO_SIZED {
563
            // invalid pointer is good enough for ZST
564
0
            invalid_mut(self.ptr.as_ptr() as usize + offset)
565
        } else {
566
0
            self.ptr.as_ptr().sub(offset)
567
        };
568
0
        Self {
569
0
            ptr: NonNull::new_unchecked(ptr),
570
0
        }
571
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<_>>::next_n
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::next_n
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, u32)>>::next_n
572
573
    /// Executes the destructor (if any) of the pointed-to `data`.
574
    ///
575
    /// # Safety
576
    ///
577
    /// See [`ptr::drop_in_place`] for safety concerns.
578
    ///
579
    /// You should use [`RawTable::erase`] instead of this function,
580
    /// or be careful with calling this function directly, because for
581
    /// properly dropping the data we need also clear `data` control bytes.
582
    /// If we drop data, but do not erase `data control byte` it leads to
583
    /// double drop when [`RawTable`] goes out of scope.
584
    ///
585
    /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
586
    /// [`RawTable`]: crate::raw::RawTable
587
    /// [`RawTable::erase`]: crate::raw::RawTable::erase
588
    #[cfg_attr(feature = "inline-more", inline)]
589
0
    pub(crate) unsafe fn drop(&self) {
590
0
        self.as_ptr().drop_in_place();
591
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<_>>::drop
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::drop
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, u32)>>::drop
592
593
    /// Reads the `value` from `self` without moving it. This leaves the
594
    /// memory in `self` unchanged.
595
    ///
596
    /// # Safety
597
    ///
598
    /// See [`ptr::read`] for safety concerns.
599
    ///
600
    /// You should use [`RawTable::remove`] instead of this function,
601
    /// or be careful with calling this function directly, because compiler
602
    /// calls its destructor when readed `value` goes out of scope. It
603
    /// can cause double dropping when [`RawTable`] goes out of scope,
604
    /// because of not erased `data control byte`.
605
    ///
606
    /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
607
    /// [`RawTable`]: crate::raw::RawTable
608
    /// [`RawTable::remove`]: crate::raw::RawTable::remove
609
    #[inline]
610
0
    pub(crate) unsafe fn read(&self) -> T {
611
0
        self.as_ptr().read()
612
0
    }
613
614
    /// Overwrites a memory location with the given `value` without reading
615
    /// or dropping the old value (like [`ptr::write`] function).
616
    ///
617
    /// # Safety
618
    ///
619
    /// See [`ptr::write`] for safety concerns.
620
    ///
621
    /// # Note
622
    ///
623
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
624
    /// those for the old `T` value, as the map will not re-evaluate where the new
625
    /// value should go, meaning the value may become "lost" if their location
626
    /// does not reflect their state.
627
    ///
628
    /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
629
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
630
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
631
    #[inline]
632
0
    pub(crate) unsafe fn write(&self, val: T) {
633
0
        self.as_ptr().write(val);
634
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<_>>::write
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, u32)>>::write
635
636
    /// Returns a shared immutable reference to the `value`.
637
    ///
638
    /// # Safety
639
    ///
640
    /// See [`NonNull::as_ref`] for safety concerns.
641
    ///
642
    /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
643
    ///
644
    /// # Examples
645
    ///
646
    /// ```
647
    /// # #[cfg(feature = "raw")]
648
    /// # fn test() {
649
    /// use core::hash::{BuildHasher, Hash};
650
    /// use hashbrown::raw::{Bucket, RawTable};
651
    ///
652
    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
653
    ///
654
    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
655
    ///     use core::hash::Hasher;
656
    ///     let mut state = hash_builder.build_hasher();
657
    ///     key.hash(&mut state);
658
    ///     state.finish()
659
    /// }
660
    ///
661
    /// let hash_builder = NewHashBuilder::default();
662
    /// let mut table = RawTable::new();
663
    ///
664
    /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
665
    /// let hash = make_hash(&hash_builder, &value.0);
666
    ///
667
    /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
668
    ///
669
    /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
670
    ///
671
    /// assert_eq!(
672
    ///     unsafe { bucket.as_ref() },
673
    ///     &("A pony", "is a small horse".to_owned())
674
    /// );
675
    /// # }
676
    /// # fn main() {
677
    /// #     #[cfg(feature = "raw")]
678
    /// #     test()
679
    /// # }
680
    /// ```
681
    #[inline]
682
0
    pub unsafe fn as_ref<'a>(&self) -> &'a T {
683
0
        &*self.as_ptr()
684
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<_>>::as_ref
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::as_ref
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, u32)>>::as_ref
685
686
    /// Returns a unique mutable reference to the `value`.
687
    ///
688
    /// # Safety
689
    ///
690
    /// See [`NonNull::as_mut`] for safety concerns.
691
    ///
692
    /// # Note
693
    ///
694
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
695
    /// those for the old `T` value, as the map will not re-evaluate where the new
696
    /// value should go, meaning the value may become "lost" if their location
697
    /// does not reflect their state.
698
    ///
699
    /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
700
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
701
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
702
    ///
703
    /// # Examples
704
    ///
705
    /// ```
706
    /// # #[cfg(feature = "raw")]
707
    /// # fn test() {
708
    /// use core::hash::{BuildHasher, Hash};
709
    /// use hashbrown::raw::{Bucket, RawTable};
710
    ///
711
    /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
712
    ///
713
    /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
714
    ///     use core::hash::Hasher;
715
    ///     let mut state = hash_builder.build_hasher();
716
    ///     key.hash(&mut state);
717
    ///     state.finish()
718
    /// }
719
    ///
720
    /// let hash_builder = NewHashBuilder::default();
721
    /// let mut table = RawTable::new();
722
    ///
723
    /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
724
    /// let hash = make_hash(&hash_builder, &value.0);
725
    ///
726
    /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
727
    ///
728
    /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
729
    ///
730
    /// unsafe {
731
    ///     bucket
732
    ///         .as_mut()
733
    ///         .1
734
    ///         .push_str(" less than 147 cm at the withers")
735
    /// };
736
    /// assert_eq!(
737
    ///     unsafe { bucket.as_ref() },
738
    ///     &(
739
    ///         "A pony",
740
    ///         "is a small horse less than 147 cm at the withers".to_owned()
741
    ///     )
742
    /// );
743
    /// # }
744
    /// # fn main() {
745
    /// #     #[cfg(feature = "raw")]
746
    /// #     test()
747
    /// # }
748
    /// ```
749
    #[inline]
750
0
    pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
751
0
        &mut *self.as_ptr()
752
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<_>>::as_mut
Unexecuted instantiation: <hashbrown::raw::inner::Bucket<(gix_hash::object_id::ObjectId, u32)>>::as_mut
753
754
    /// Copies `size_of<T>` bytes from `other` to `self`. The source
755
    /// and destination may *not* overlap.
756
    ///
757
    /// # Safety
758
    ///
759
    /// See [`ptr::copy_nonoverlapping`] for safety concerns.
760
    ///
761
    /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
762
    /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
763
    /// in the region beginning at `*self` and the region beginning at `*other` can
764
    /// [violate memory safety].
765
    ///
766
    /// # Note
767
    ///
768
    /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
769
    /// those for the old `T` value, as the map will not re-evaluate where the new
770
    /// value should go, meaning the value may become "lost" if their location
771
    /// does not reflect their state.
772
    ///
773
    /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html
774
    /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
775
    /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value
776
    /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
777
    /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
778
    #[cfg(feature = "raw")]
779
    #[inline]
780
0
    pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
781
0
        self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
782
0
    }
783
}
784
785
/// A raw hash table with an unsafe API.
786
pub struct RawTable<T, A: Allocator = Global> {
787
    table: RawTableInner,
788
    alloc: A,
789
    // Tell dropck that we own instances of T.
790
    marker: PhantomData<T>,
791
}
792
793
/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
794
/// of how many different key-value types are used.
795
struct RawTableInner {
796
    // Mask to get an index from a hash value. The value is one less than the
797
    // number of buckets in the table.
798
    bucket_mask: usize,
799
800
    // [Padding], T1, T2, ..., Tlast, C1, C2, ...
801
    //                                ^ points here
802
    ctrl: NonNull<u8>,
803
804
    // Number of elements that can be inserted before we need to grow the table
805
    growth_left: usize,
806
807
    // Number of elements in the table, only really used by len()
808
    items: usize,
809
}
810
811
impl<T> RawTable<T, Global> {
812
    /// Creates a new empty hash table without allocating any memory.
813
    ///
814
    /// In effect this returns a table with exactly 1 bucket. However we can
815
    /// leave the data pointer dangling since that bucket is never written to
816
    /// due to our load factor forcing us to always have at least 1 free bucket.
817
    #[inline]
818
0
    pub const fn new() -> Self {
819
0
        Self {
820
0
            table: RawTableInner::NEW,
821
0
            alloc: Global,
822
0
            marker: PhantomData,
823
0
        }
824
0
    }
825
826
    /// Attempts to allocate a new hash table with at least enough capacity
827
    /// for inserting the given number of elements without reallocating.
828
    #[cfg(feature = "raw")]
829
0
    pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
830
0
        Self::try_with_capacity_in(capacity, Global)
831
0
    }
832
833
    /// Allocates a new hash table with at least enough capacity for inserting
834
    /// the given number of elements without reallocating.
835
0
    pub fn with_capacity(capacity: usize) -> Self {
836
0
        Self::with_capacity_in(capacity, Global)
837
0
    }
838
}
839
840
impl<T, A: Allocator> RawTable<T, A> {
841
    const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
842
843
    /// Creates a new empty hash table without allocating any memory, using the
844
    /// given allocator.
845
    ///
846
    /// In effect this returns a table with exactly 1 bucket. However we can
847
    /// leave the data pointer dangling since that bucket is never written to
848
    /// due to our load factor forcing us to always have at least 1 free bucket.
849
    #[inline]
850
0
    pub const fn new_in(alloc: A) -> Self {
851
0
        Self {
852
0
            table: RawTableInner::NEW,
853
0
            alloc,
854
0
            marker: PhantomData,
855
0
        }
856
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::new_in
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::new_in
857
858
    /// Allocates a new hash table with the given number of buckets.
859
    ///
860
    /// The control bytes are left uninitialized.
861
    #[cfg_attr(feature = "inline-more", inline)]
862
0
    unsafe fn new_uninitialized(
863
0
        alloc: A,
864
0
        buckets: usize,
865
0
        fallibility: Fallibility,
866
0
    ) -> Result<Self, TryReserveError> {
867
0
        debug_assert!(buckets.is_power_of_two());
868
869
        Ok(Self {
870
0
            table: RawTableInner::new_uninitialized(
871
0
                &alloc,
872
0
                Self::TABLE_LAYOUT,
873
0
                buckets,
874
0
                fallibility,
875
0
            )?,
876
0
            alloc,
877
0
            marker: PhantomData,
878
        })
879
0
    }
880
881
    /// Attempts to allocate a new hash table using the given allocator, with at least enough
882
    /// capacity for inserting the given number of elements without reallocating.
883
    #[cfg(feature = "raw")]
884
0
    pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
885
0
        Ok(Self {
886
0
            table: RawTableInner::fallible_with_capacity(
887
0
                &alloc,
888
0
                Self::TABLE_LAYOUT,
889
0
                capacity,
890
0
                Fallibility::Fallible,
891
0
            )?,
892
0
            alloc,
893
0
            marker: PhantomData,
894
        })
895
0
    }
896
897
    /// Allocates a new hash table using the given allocator, with at least enough capacity for
898
    /// inserting the given number of elements without reallocating.
899
0
    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
900
0
        Self {
901
0
            table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
902
0
            alloc,
903
0
            marker: PhantomData,
904
0
        }
905
0
    }
906
907
    /// Returns a reference to the underlying allocator.
908
    #[inline]
909
0
    pub fn allocator(&self) -> &A {
910
0
        &self.alloc
911
0
    }
912
913
    /// Returns pointer to one past last `data` element in the table as viewed from
914
    /// the start point of the allocation.
915
    ///
916
    /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
917
    /// otherwise using it may result in [`undefined behavior`].
918
    ///
919
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
920
    #[inline]
921
0
    pub fn data_end(&self) -> NonNull<T> {
922
0
        //                        `self.table.ctrl.cast()` returns pointer that
923
0
        //                        points here (to the end of `T0`)
924
0
        //                          ∨
925
0
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
926
0
        //                           \________  ________/
927
0
        //                                    \/
928
0
        //       `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
929
0
        //
930
0
        // where: T0...T_n  - our stored data;
931
0
        //        CT0...CT_n - control bytes or metadata for `data`.
932
0
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
933
0
        //                        with loading `Group` bytes from the heap works properly, even if the result
934
0
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
935
0
        //                        `RawTableInner::set_ctrl` function.
936
0
        //
937
0
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
938
0
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
939
0
        self.table.ctrl.cast()
940
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::data_end
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::data_end
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::data_end
941
942
    /// Returns pointer to start of data table.
943
    #[inline]
944
    #[cfg(any(feature = "raw", feature = "nightly"))]
945
0
    pub unsafe fn data_start(&self) -> NonNull<T> {
946
0
        NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
947
0
    }
948
949
    /// Return the information about memory allocated by the table.
950
    ///
951
    /// `RawTable` allocates single memory block to store both data and metadata.
952
    /// This function returns allocation size and alignment and the beginning of the area.
953
    /// These are the arguments which will be passed to `dealloc` when the table is dropped.
954
    ///
955
    /// This function might be useful for memory profiling.
956
    #[inline]
957
    #[cfg(feature = "raw")]
958
0
    pub fn allocation_info(&self) -> (NonNull<u8>, Layout) {
959
0
        // SAFETY: We use the same `table_layout` that was used to allocate
960
0
        // this table.
961
0
        unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) }
962
0
    }
963
964
    /// Returns the index of a bucket from a `Bucket`.
965
    #[inline]
966
0
    pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
967
0
        bucket.to_base_index(self.data_end())
968
0
    }
969
970
    /// Returns a pointer to an element in the table.
971
    ///
972
    /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
973
    /// otherwise using it may result in [`undefined behavior`].
974
    ///
975
    /// # Safety
976
    ///
977
    /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
978
    /// following safety rules:
979
    ///
980
    /// * The table must already be allocated;
981
    ///
982
    /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
983
    ///   function, i.e. `(index + 1) <= self.buckets()`.
984
    ///
985
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
986
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
987
    ///
988
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
989
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
990
    /// `(index + 1) <= self.buckets()`.
991
    ///
992
    /// [`RawTable::buckets`]: RawTable::buckets
993
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
994
    #[inline]
995
0
    pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
996
0
        // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
997
0
        // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
998
0
        // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
999
0
        //
1000
0
        //           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
1001
0
        //           part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
1002
0
        //                  |
1003
0
        //                  |               `base = self.data_end()` points here
1004
0
        //                  |               (to the start of CT0 or to the end of T0)
1005
0
        //                  v                 v
1006
0
        // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
1007
0
        //                     ^                                              \__________  __________/
1008
0
        //        `table.bucket(3)` returns a pointer that points                        \/
1009
0
        //         here in the `data` part of the `RawTable` (to              additional control bytes
1010
0
        //         the end of T3)                                              `m = Group::WIDTH - 1`
1011
0
        //
1012
0
        // where: T0...T_n  - our stored data;
1013
0
        //        CT0...CT_n - control bytes or metadata for `data`;
1014
0
        //        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
1015
0
        //                        the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
1016
0
        //                        is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
1017
0
        //
1018
0
        // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
1019
0
        // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
1020
0
        debug_assert_ne!(self.table.bucket_mask, 0);
1021
0
        debug_assert!(index < self.buckets());
1022
0
        Bucket::from_base_index(self.data_end(), index)
1023
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::bucket
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::bucket
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::bucket
1024
1025
    /// Erases an element from the table without dropping it.
1026
    #[cfg_attr(feature = "inline-more", inline)]
1027
0
    unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
1028
0
        let index = self.bucket_index(item);
1029
0
        self.table.erase(index);
1030
0
    }
1031
1032
    /// Erases an element from the table, dropping it in place.
1033
    #[cfg_attr(feature = "inline-more", inline)]
1034
    #[allow(clippy::needless_pass_by_value)]
1035
0
    pub unsafe fn erase(&mut self, item: Bucket<T>) {
1036
0
        // Erase the element from the table first since drop might panic.
1037
0
        self.erase_no_drop(&item);
1038
0
        item.drop();
1039
0
    }
1040
1041
    /// Finds and erases an element from the table, dropping it in place.
1042
    /// Returns true if an element was found.
1043
    #[cfg(feature = "raw")]
1044
    #[cfg_attr(feature = "inline-more", inline)]
1045
0
    pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
1046
        // Avoid `Option::map` because it bloats LLVM IR.
1047
0
        if let Some(bucket) = self.find(hash, eq) {
1048
0
            unsafe {
1049
0
                self.erase(bucket);
1050
0
            }
1051
0
            true
1052
        } else {
1053
0
            false
1054
        }
1055
0
    }
1056
1057
    /// Removes an element from the table, returning it.
1058
    ///
1059
    /// This also returns an `InsertSlot` pointing to the newly free bucket.
1060
    #[cfg_attr(feature = "inline-more", inline)]
1061
    #[allow(clippy::needless_pass_by_value)]
1062
0
    pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
1063
0
        self.erase_no_drop(&item);
1064
0
        (
1065
0
            item.read(),
1066
0
            InsertSlot {
1067
0
                index: self.bucket_index(&item),
1068
0
            },
1069
0
        )
1070
0
    }
1071
1072
    /// Finds and removes an element from the table, returning it.
1073
    #[cfg_attr(feature = "inline-more", inline)]
1074
0
    pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
1075
0
        // Avoid `Option::map` because it bloats LLVM IR.
1076
0
        match self.find(hash, eq) {
1077
0
            Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
1078
0
            None => None,
1079
        }
1080
0
    }
1081
1082
    /// Marks all table buckets as empty without dropping their contents.
1083
    #[cfg_attr(feature = "inline-more", inline)]
1084
0
    pub fn clear_no_drop(&mut self) {
1085
0
        self.table.clear_no_drop();
1086
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::clear_no_drop
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::clear_no_drop
1087
1088
    /// Removes all elements from the table without freeing the backing memory.
1089
    #[cfg_attr(feature = "inline-more", inline)]
1090
0
    pub fn clear(&mut self) {
1091
0
        if self.is_empty() {
1092
            // Special case empty table to avoid surprising O(capacity) time.
1093
0
            return;
1094
0
        }
1095
0
        // Ensure that the table is reset even if one of the drops panic
1096
0
        let mut self_ = guard(self, |self_| self_.clear_no_drop());
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::clear::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::clear::{closure#0}
1097
0
        unsafe {
1098
0
            // SAFETY: ScopeGuard sets to zero the `items` field of the table
1099
0
            // even in case of panic during the dropping of the elements so
1100
0
            // that there will be no double drop of the elements.
1101
0
            self_.table.drop_elements::<T>();
1102
0
        }
1103
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::clear
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::clear
1104
1105
    /// Shrinks the table to fit `max(self.len(), min_size)` elements.
1106
    #[cfg_attr(feature = "inline-more", inline)]
1107
0
    pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
1108
0
        // Calculate the minimal number of elements that we need to reserve
1109
0
        // space for.
1110
0
        let min_size = usize::max(self.table.items, min_size);
1111
0
        if min_size == 0 {
1112
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
1113
0
            unsafe {
1114
0
                // SAFETY:
1115
0
                // 1. We call the function only once;
1116
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
1117
0
                //    and [`TableLayout`] that were used to allocate this table.
1118
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
1119
0
                //    because we have replaced the inner table with a new one.
1120
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
1121
0
            }
1122
0
            return;
1123
0
        }
1124
1125
        // Calculate the number of buckets that we need for this number of
1126
        // elements. If the calculation overflows then the requested bucket
1127
        // count must be larger than what we have right and nothing needs to be
1128
        // done.
1129
0
        let min_buckets = match capacity_to_buckets(min_size) {
1130
0
            Some(buckets) => buckets,
1131
0
            None => return,
1132
        };
1133
1134
        // If we have more buckets than we need, shrink the table.
1135
0
        if min_buckets < self.buckets() {
1136
            // Fast path if the table is empty
1137
0
            if self.table.items == 0 {
1138
0
                let new_inner =
1139
0
                    RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
1140
0
                let mut old_inner = mem::replace(&mut self.table, new_inner);
1141
0
                unsafe {
1142
0
                    // SAFETY:
1143
0
                    // 1. We call the function only once;
1144
0
                    // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
1145
0
                    //    and [`TableLayout`] that were used to allocate this table.
1146
0
                    // 3. If any elements' drop function panics, then there will only be a memory leak,
1147
0
                    //    because we have replaced the inner table with a new one.
1148
0
                    old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
1149
0
                }
1150
            } else {
1151
                // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1152
                unsafe {
1153
                    // SAFETY:
1154
                    // 1. We know for sure that `min_size >= self.table.items`.
1155
                    // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1156
                    //    we will never expose RawTable::new_uninitialized in a public API.
1157
0
                    if self
1158
0
                        .resize(min_size, hasher, Fallibility::Infallible)
1159
0
                        .is_err()
1160
                    {
1161
                        // SAFETY: The result of calling the `resize` function cannot be an error
1162
                        // because `fallibility == Fallibility::Infallible.
1163
0
                        hint::unreachable_unchecked()
1164
0
                    }
1165
                }
1166
            }
1167
0
        }
1168
0
    }
1169
1170
    /// Ensures that at least `additional` items can be inserted into the table
1171
    /// without reallocation.
1172
    #[cfg_attr(feature = "inline-more", inline)]
1173
0
    pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
1174
0
        if unlikely(additional > self.table.growth_left) {
1175
            // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1176
            unsafe {
1177
                // SAFETY: The [`RawTableInner`] must already have properly initialized control
1178
                // bytes since we will never expose RawTable::new_uninitialized in a public API.
1179
0
                if self
1180
0
                    .reserve_rehash(additional, hasher, Fallibility::Infallible)
1181
0
                    .is_err()
1182
                {
1183
                    // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
1184
0
                    hint::unreachable_unchecked()
1185
0
                }
1186
            }
1187
0
        }
1188
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::reserve::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::reserve::<hashbrown::map::make_hasher<gix_hash::object_id::ObjectId, u32, gix_hashtable::hash::Builder>::{closure#0}>
1189
1190
    /// Tries to ensure that at least `additional` items can be inserted into
1191
    /// the table without reallocation.
1192
    #[cfg_attr(feature = "inline-more", inline)]
1193
0
    pub fn try_reserve(
1194
0
        &mut self,
1195
0
        additional: usize,
1196
0
        hasher: impl Fn(&T) -> u64,
1197
0
    ) -> Result<(), TryReserveError> {
1198
0
        if additional > self.table.growth_left {
1199
            // SAFETY: The [`RawTableInner`] must already have properly initialized control
1200
            // bytes since we will never expose RawTable::new_uninitialized in a public API.
1201
0
            unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
1202
        } else {
1203
0
            Ok(())
1204
        }
1205
0
    }
1206
1207
    /// Out-of-line slow path for `reserve` and `try_reserve`.
1208
    ///
1209
    /// # Safety
1210
    ///
1211
    /// The [`RawTableInner`] must have properly initialized control bytes,
1212
    /// otherwise calling this function results in [`undefined behavior`]
1213
    ///
1214
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1215
    #[cold]
1216
    #[inline(never)]
1217
0
    unsafe fn reserve_rehash(
1218
0
        &mut self,
1219
0
        additional: usize,
1220
0
        hasher: impl Fn(&T) -> u64,
1221
0
        fallibility: Fallibility,
1222
0
    ) -> Result<(), TryReserveError> {
1223
0
        unsafe {
1224
0
            // SAFETY:
1225
0
            // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1226
0
            //    [`TableLayout`] that were used to allocate this table.
1227
0
            // 2. The `drop` function is the actual drop function of the elements stored in
1228
0
            //    the table.
1229
0
            // 3. The caller ensures that the control bytes of the `RawTableInner`
1230
0
            //    are already initialized.
1231
0
            self.table.reserve_rehash_inner(
1232
0
                &self.alloc,
1233
0
                additional,
1234
0
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::reserve_rehash::<_>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::reserve_rehash::<hashbrown::map::make_hasher<gix_hash::object_id::ObjectId, u32, gix_hashtable::hash::Builder>::{closure#0}>::{closure#0}
1235
0
                fallibility,
1236
0
                Self::TABLE_LAYOUT,
1237
0
                if T::NEEDS_DROP {
1238
0
                    Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
1239
                } else {
1240
0
                    None
1241
                },
1242
            )
1243
        }
1244
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::reserve_rehash::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::reserve_rehash::<hashbrown::map::make_hasher<gix_hash::object_id::ObjectId, u32, gix_hashtable::hash::Builder>::{closure#0}>
1245
1246
    /// Allocates a new table of a different size and moves the contents of the
1247
    /// current table into it.
1248
    ///
1249
    /// # Safety
1250
    ///
1251
    /// The [`RawTableInner`] must have properly initialized control bytes,
1252
    /// otherwise calling this function results in [`undefined behavior`]
1253
    ///
1254
    /// The caller of this function must ensure that `capacity >= self.table.items`
1255
    /// otherwise:
1256
    ///
1257
    /// * If `self.table.items != 0`, calling of this function with `capacity`
1258
    ///   equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1259
    ///
1260
    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
1261
    ///   `self.table.items > capacity_to_buckets(capacity)`
1262
    ///   calling this function results in [`undefined behavior`].
1263
    ///
1264
    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
1265
    ///   `self.table.items > capacity_to_buckets(capacity)`
1266
    ///   calling this function are never return (will go into an
1267
    ///   infinite loop).
1268
    ///
1269
    /// See [`RawTableInner::find_insert_slot`] for more information.
1270
    ///
1271
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1272
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1273
0
    unsafe fn resize(
1274
0
        &mut self,
1275
0
        capacity: usize,
1276
0
        hasher: impl Fn(&T) -> u64,
1277
0
        fallibility: Fallibility,
1278
0
    ) -> Result<(), TryReserveError> {
1279
0
        // SAFETY:
1280
0
        // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1281
0
        // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1282
0
        //    [`TableLayout`] that were used to allocate this table.
1283
0
        // 3. The caller ensures that the control bytes of the `RawTableInner`
1284
0
        //    are already initialized.
1285
0
        self.table.resize_inner(
1286
0
            &self.alloc,
1287
0
            capacity,
1288
0
            &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1289
0
            fallibility,
1290
0
            Self::TABLE_LAYOUT,
1291
0
        )
1292
0
    }
1293
1294
    /// Inserts a new element into the table, and returns its raw bucket.
1295
    ///
1296
    /// This does not check if the given element already exists in the table.
1297
    #[cfg_attr(feature = "inline-more", inline)]
1298
0
    pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1299
0
        unsafe {
1300
0
            // SAFETY:
1301
0
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1302
0
            //    we will never expose `RawTable::new_uninitialized` in a public API.
1303
0
            //
1304
0
            // 2. We reserve additional space (if necessary) right after calling this function.
1305
0
            let mut slot = self.table.find_insert_slot(hash);
1306
0
1307
0
            // We can avoid growing the table once we have reached our load factor if we are replacing
1308
0
            // a tombstone. This works since the number of EMPTY slots does not change in this case.
1309
0
            //
1310
0
            // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
1311
0
            // in the range `0..=self.buckets()`.
1312
0
            let old_ctrl = *self.table.ctrl(slot.index);
1313
0
            if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
1314
0
                self.reserve(1, hasher);
1315
0
                // SAFETY: We know for sure that `RawTableInner` has control bytes
1316
0
                // initialized and that there is extra space in the table.
1317
0
                slot = self.table.find_insert_slot(hash);
1318
0
            }
1319
1320
0
            self.insert_in_slot(hash, slot, value)
1321
0
        }
1322
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::insert::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::insert::<hashbrown::map::make_hasher<gix_hash::object_id::ObjectId, u32, gix_hashtable::hash::Builder>::{closure#0}>
1323
1324
    /// Attempts to insert a new element without growing the table and return its raw bucket.
1325
    ///
1326
    /// Returns an `Err` containing the given element if inserting it would require growing the
1327
    /// table.
1328
    ///
1329
    /// This does not check if the given element already exists in the table.
1330
    #[cfg(feature = "raw")]
1331
    #[cfg_attr(feature = "inline-more", inline)]
1332
0
    pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
1333
0
        unsafe {
1334
0
            match self.table.prepare_insert_no_grow(hash) {
1335
0
                Ok(index) => {
1336
0
                    let bucket = self.bucket(index);
1337
0
                    bucket.write(value);
1338
0
                    Ok(bucket)
1339
                }
1340
0
                Err(()) => Err(value),
1341
            }
1342
        }
1343
0
    }
1344
1345
    /// Inserts a new element into the table, and returns a mutable reference to it.
1346
    ///
1347
    /// This does not check if the given element already exists in the table.
1348
    #[cfg_attr(feature = "inline-more", inline)]
1349
0
    pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
1350
0
        unsafe { self.insert(hash, value, hasher).as_mut() }
1351
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::insert_entry::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::insert_entry::<hashbrown::map::make_hasher<gix_hash::object_id::ObjectId, u32, gix_hashtable::hash::Builder>::{closure#0}>
1352
1353
    /// Inserts a new element into the table, without growing the table.
1354
    ///
1355
    /// There must be enough space in the table to insert the new element.
1356
    ///
1357
    /// This does not check if the given element already exists in the table.
1358
    #[cfg_attr(feature = "inline-more", inline)]
1359
    #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
1360
0
    pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1361
0
        let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1362
0
        let bucket = self.table.bucket(index);
1363
0
1364
0
        // If we are replacing a DELETED entry then we don't need to update
1365
0
        // the load counter.
1366
0
        self.table.growth_left -= special_is_empty(old_ctrl) as usize;
1367
0
1368
0
        bucket.write(value);
1369
0
        self.table.items += 1;
1370
0
        bucket
1371
0
    }
1372
1373
    /// Temporary removes a bucket, applying the given function to the removed
1374
    /// element and optionally put back the returned value in the same bucket.
1375
    ///
1376
    /// Returns `true` if the bucket still contains an element
1377
    ///
1378
    /// This does not check if the given bucket is actually occupied.
1379
    #[cfg_attr(feature = "inline-more", inline)]
1380
0
    pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1381
0
    where
1382
0
        F: FnOnce(T) -> Option<T>,
1383
0
    {
1384
0
        let index = self.bucket_index(&bucket);
1385
0
        let old_ctrl = *self.table.ctrl(index);
1386
0
        debug_assert!(self.is_bucket_full(index));
1387
0
        let old_growth_left = self.table.growth_left;
1388
0
        let item = self.remove(bucket).0;
1389
0
        if let Some(new_item) = f(item) {
1390
0
            self.table.growth_left = old_growth_left;
1391
0
            self.table.set_ctrl(index, old_ctrl);
1392
0
            self.table.items += 1;
1393
0
            self.bucket(index).write(new_item);
1394
0
            true
1395
        } else {
1396
0
            false
1397
        }
1398
0
    }
1399
1400
    /// Searches for an element in the table. If the element is not found,
1401
    /// returns `Err` with the position of a slot where an element with the
1402
    /// same hash could be inserted.
1403
    ///
1404
    /// This function may resize the table if additional space is required for
1405
    /// inserting an element.
1406
    #[inline]
1407
0
    pub fn find_or_find_insert_slot(
1408
0
        &mut self,
1409
0
        hash: u64,
1410
0
        mut eq: impl FnMut(&T) -> bool,
1411
0
        hasher: impl Fn(&T) -> u64,
1412
0
    ) -> Result<Bucket<T>, InsertSlot> {
1413
0
        self.reserve(1, hasher);
1414
0
1415
0
        unsafe {
1416
0
            // SAFETY:
1417
0
            // 1. We know for sure that there is at least one empty `bucket` in the table.
1418
0
            // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1419
0
            //    never expose `RawTable::new_uninitialized` in a public API.
1420
0
            // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1421
0
            //    which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1422
0
            //    the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1423
0
            match self
1424
0
                .table
1425
0
                .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::find_or_find_insert_slot::<_, _>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<gix_hash::object_id::ObjectId, gix_hash::object_id::ObjectId, u32>::{closure#0}, hashbrown::map::make_hasher<gix_hash::object_id::ObjectId, u32, gix_hashtable::hash::Builder>::{closure#0}>::{closure#0}
1426
            {
1427
                // SAFETY: See explanation above.
1428
0
                Ok(index) => Ok(self.bucket(index)),
1429
0
                Err(slot) => Err(slot),
1430
            }
1431
        }
1432
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::find_or_find_insert_slot::<_, _>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::find_or_find_insert_slot::<hashbrown::map::equivalent_key<gix_hash::object_id::ObjectId, gix_hash::object_id::ObjectId, u32>::{closure#0}, hashbrown::map::make_hasher<gix_hash::object_id::ObjectId, u32, gix_hashtable::hash::Builder>::{closure#0}>
1433
1434
    /// Inserts a new element into the table in the given slot, and returns its
1435
    /// raw bucket.
1436
    ///
1437
    /// # Safety
1438
    ///
1439
    /// `slot` must point to a slot previously returned by
1440
    /// `find_or_find_insert_slot`, and no mutation of the table must have
1441
    /// occurred since that call.
1442
    #[inline]
1443
0
    pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1444
0
        let old_ctrl = *self.table.ctrl(slot.index);
1445
0
        self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1446
0
1447
0
        let bucket = self.bucket(slot.index);
1448
0
        bucket.write(value);
1449
0
        bucket
1450
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::insert_in_slot
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::insert_in_slot
1451
1452
    /// Searches for an element in the table.
1453
    #[inline]
1454
0
    pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1455
0
        unsafe {
1456
0
            // SAFETY:
1457
0
            // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1458
0
            //    will never expose `RawTable::new_uninitialized` in a public API.
1459
0
            // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1460
0
            //    the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1461
0
            //    is safe.
1462
0
            let result = self
1463
0
                .table
1464
0
                .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::find::<_>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::find::<hashbrown::map::equivalent_key<gix_hash::object_id::ObjectId, gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::find::<hashbrown::map::equivalent_key<gix_hash::borrowed::oid, gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::find::<hashbrown::map::equivalent_key<gix_hash::object_id::ObjectId, gix_hash::object_id::ObjectId, u32>::{closure#0}>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::find::<hashbrown::map::equivalent_key<gix_hash::borrowed::oid, gix_hash::object_id::ObjectId, u32>::{closure#0}>::{closure#0}
1465
0
1466
0
            // Avoid `Option::map` because it bloats LLVM IR.
1467
0
            match result {
1468
                // SAFETY: See explanation above.
1469
0
                Some(index) => Some(self.bucket(index)),
1470
0
                None => None,
1471
            }
1472
        }
1473
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::find::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::find::<hashbrown::map::equivalent_key<gix_hash::object_id::ObjectId, gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::find::<hashbrown::map::equivalent_key<gix_hash::borrowed::oid, gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::find::<hashbrown::map::equivalent_key<gix_hash::object_id::ObjectId, gix_hash::object_id::ObjectId, u32>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::find::<hashbrown::map::equivalent_key<gix_hash::borrowed::oid, gix_hash::object_id::ObjectId, u32>::{closure#0}>
1474
1475
    /// Gets a reference to an element in the table.
1476
    #[inline]
1477
0
    pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1478
0
        // Avoid `Option::map` because it bloats LLVM IR.
1479
0
        match self.find(hash, eq) {
1480
0
            Some(bucket) => Some(unsafe { bucket.as_ref() }),
1481
0
            None => None,
1482
        }
1483
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::get::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::get::<hashbrown::map::equivalent_key<gix_hash::object_id::ObjectId, gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::get::<hashbrown::map::equivalent_key<gix_hash::borrowed::oid, gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>>::{closure#0}>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::get::<hashbrown::map::equivalent_key<gix_hash::borrowed::oid, gix_hash::object_id::ObjectId, u32>::{closure#0}>
1484
1485
    /// Gets a mutable reference to an element in the table.
1486
    #[inline]
1487
0
    pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1488
0
        // Avoid `Option::map` because it bloats LLVM IR.
1489
0
        match self.find(hash, eq) {
1490
0
            Some(bucket) => Some(unsafe { bucket.as_mut() }),
1491
0
            None => None,
1492
        }
1493
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::get_mut::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::get_mut::<hashbrown::map::equivalent_key<gix_hash::borrowed::oid, gix_hash::object_id::ObjectId, u32>::{closure#0}>
1494
1495
    /// Attempts to get mutable references to `N` entries in the table at once.
1496
    ///
1497
    /// Returns an array of length `N` with the results of each query.
1498
    ///
1499
    /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1500
    /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1501
    ///
1502
    /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1503
    /// the `i`th key to be looked up.
1504
0
    pub fn get_many_mut<const N: usize>(
1505
0
        &mut self,
1506
0
        hashes: [u64; N],
1507
0
        eq: impl FnMut(usize, &T) -> bool,
1508
0
    ) -> Option<[&'_ mut T; N]> {
1509
        unsafe {
1510
0
            let ptrs = self.get_many_mut_pointers(hashes, eq)?;
1511
1512
0
            for (i, &cur) in ptrs.iter().enumerate() {
1513
0
                if ptrs[..i].iter().any(|&prev| ptr::eq::<T>(prev, cur)) {
1514
0
                    return None;
1515
0
                }
1516
            }
1517
            // All bucket are distinct from all previous buckets so we're clear to return the result
1518
            // of the lookup.
1519
1520
            // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1521
0
            Some(mem::transmute_copy(&ptrs))
1522
        }
1523
0
    }
1524
1525
0
    pub unsafe fn get_many_unchecked_mut<const N: usize>(
1526
0
        &mut self,
1527
0
        hashes: [u64; N],
1528
0
        eq: impl FnMut(usize, &T) -> bool,
1529
0
    ) -> Option<[&'_ mut T; N]> {
1530
0
        let ptrs = self.get_many_mut_pointers(hashes, eq)?;
1531
0
        Some(mem::transmute_copy(&ptrs))
1532
0
    }
1533
1534
0
    unsafe fn get_many_mut_pointers<const N: usize>(
1535
0
        &mut self,
1536
0
        hashes: [u64; N],
1537
0
        mut eq: impl FnMut(usize, &T) -> bool,
1538
0
    ) -> Option<[*mut T; N]> {
1539
0
        // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
1540
0
        let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit();
1541
0
        let outs_ptr = outs.as_mut_ptr();
1542
1543
0
        for (i, &hash) in hashes.iter().enumerate() {
1544
0
            let cur = self.find(hash, |k| eq(i, k))?;
1545
0
            *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut();
1546
        }
1547
1548
        // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1549
0
        Some(outs.assume_init())
1550
0
    }
1551
1552
    /// Returns the number of elements the map can hold without reallocating.
1553
    ///
1554
    /// This number is a lower bound; the table might be able to hold
1555
    /// more, but is guaranteed to be able to hold at least this many.
1556
    #[inline]
1557
0
    pub fn capacity(&self) -> usize {
1558
0
        self.table.items + self.table.growth_left
1559
0
    }
1560
1561
    /// Returns the number of elements in the table.
1562
    #[inline]
1563
0
    pub fn len(&self) -> usize {
1564
0
        self.table.items
1565
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::len
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::len
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::len
1566
1567
    /// Returns `true` if the table contains no elements.
1568
    #[inline]
1569
0
    pub fn is_empty(&self) -> bool {
1570
0
        self.len() == 0
1571
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::is_empty
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::is_empty
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::is_empty
1572
1573
    /// Returns the number of buckets in the table.
1574
    #[inline]
1575
0
    pub fn buckets(&self) -> usize {
1576
0
        self.table.bucket_mask + 1
1577
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _>>::buckets
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::buckets
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, u32)>>::buckets
1578
1579
    /// Checks whether the bucket at `index` is full.
1580
    ///
1581
    /// # Safety
1582
    ///
1583
    /// The caller must ensure `index` is less than the number of buckets.
1584
    #[inline]
1585
0
    pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1586
0
        self.table.is_bucket_full(index)
1587
0
    }
1588
1589
    /// Returns an iterator over every element in the table. It is up to
1590
    /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1591
    /// Because we cannot make the `next` method unsafe on the `RawIter`
1592
    /// struct, we have to make the `iter` method unsafe.
1593
    #[inline]
1594
0
    pub unsafe fn iter(&self) -> RawIter<T> {
1595
0
        // SAFETY:
1596
0
        // 1. The caller must uphold the safety contract for `iter` method.
1597
0
        // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1598
0
        //    we will never expose RawTable::new_uninitialized in a public API.
1599
0
        self.table.iter()
1600
0
    }
1601
1602
    /// Returns an iterator over occupied buckets that could match a given hash.
1603
    ///
1604
    /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1605
    /// return items that have a hash value different than the one provided. You
1606
    /// should always validate the returned values before using them.
1607
    ///
1608
    /// It is up to the caller to ensure that the `RawTable` outlives the
1609
    /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1610
    /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1611
    #[cfg_attr(feature = "inline-more", inline)]
1612
    #[cfg(feature = "raw")]
1613
0
    pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1614
0
        RawIterHash::new(self, hash)
1615
0
    }
1616
1617
    /// Returns an iterator which removes all elements from the table without
1618
    /// freeing the memory.
1619
    #[cfg_attr(feature = "inline-more", inline)]
1620
0
    pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1621
0
        unsafe {
1622
0
            let iter = self.iter();
1623
0
            self.drain_iter_from(iter)
1624
0
        }
1625
0
    }
1626
1627
    /// Returns an iterator which removes all elements from the table without
1628
    /// freeing the memory.
1629
    ///
1630
    /// Iteration starts at the provided iterator's current location.
1631
    ///
1632
    /// It is up to the caller to ensure that the iterator is valid for this
1633
    /// `RawTable` and covers all items that remain in the table.
1634
    #[cfg_attr(feature = "inline-more", inline)]
1635
0
    pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1636
0
        debug_assert_eq!(iter.len(), self.len());
1637
0
        RawDrain {
1638
0
            iter,
1639
0
            table: mem::replace(&mut self.table, RawTableInner::NEW),
1640
0
            orig_table: NonNull::from(&mut self.table),
1641
0
            marker: PhantomData,
1642
0
        }
1643
0
    }
1644
1645
    /// Returns an iterator which consumes all elements from the table.
1646
    ///
1647
    /// Iteration starts at the provided iterator's current location.
1648
    ///
1649
    /// It is up to the caller to ensure that the iterator is valid for this
1650
    /// `RawTable` and covers all items that remain in the table.
1651
0
    pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1652
0
        debug_assert_eq!(iter.len(), self.len());
1653
1654
0
        let allocation = self.into_allocation();
1655
0
        RawIntoIter {
1656
0
            iter,
1657
0
            allocation,
1658
0
            marker: PhantomData,
1659
0
        }
1660
0
    }
1661
1662
    /// Converts the table into a raw allocation. The contents of the table
1663
    /// should be dropped using a `RawIter` before freeing the allocation.
1664
    #[cfg_attr(feature = "inline-more", inline)]
1665
0
    pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1666
0
        let alloc = if self.table.is_empty_singleton() {
1667
0
            None
1668
        } else {
1669
            // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1670
0
            let (layout, ctrl_offset) =
1671
0
                match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1672
0
                    Some(lco) => lco,
1673
0
                    None => unsafe { hint::unreachable_unchecked() },
1674
                };
1675
0
            Some((
1676
0
                unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
1677
0
                layout,
1678
0
                unsafe { ptr::read(&self.alloc) },
1679
0
            ))
1680
        };
1681
0
        mem::forget(self);
1682
0
        alloc
1683
0
    }
1684
}
1685
1686
unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1687
where
1688
    T: Send,
1689
    A: Send,
1690
{
1691
}
1692
unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1693
where
1694
    T: Sync,
1695
    A: Sync,
1696
{
1697
}
1698
1699
impl RawTableInner {
1700
    const NEW: Self = RawTableInner::new();
1701
1702
    /// Creates a new empty hash table without allocating any memory.
1703
    ///
1704
    /// In effect this returns a table with exactly 1 bucket. However we can
1705
    /// leave the data pointer dangling since that bucket is never accessed
1706
    /// due to our load factor forcing us to always have at least 1 free bucket.
1707
    #[inline]
1708
0
    const fn new() -> Self {
1709
0
        Self {
1710
0
            // Be careful to cast the entire slice to a raw pointer.
1711
0
            ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
1712
0
            bucket_mask: 0,
1713
0
            items: 0,
1714
0
            growth_left: 0,
1715
0
        }
1716
0
    }
1717
}
1718
1719
impl RawTableInner {
1720
    /// Allocates a new [`RawTableInner`] with the given number of buckets.
1721
    /// The control bytes and buckets are left uninitialized.
1722
    ///
1723
    /// # Safety
1724
    ///
1725
    /// The caller of this function must ensure that the `buckets` is power of two
1726
    /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1727
    /// Group::WIDTH` with the [`EMPTY`] bytes.
1728
    ///
1729
    /// See also [`Allocator`] API for other safety concerns.
1730
    ///
1731
    /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1732
    #[cfg_attr(feature = "inline-more", inline)]
1733
0
    unsafe fn new_uninitialized<A>(
1734
0
        alloc: &A,
1735
0
        table_layout: TableLayout,
1736
0
        buckets: usize,
1737
0
        fallibility: Fallibility,
1738
0
    ) -> Result<Self, TryReserveError>
1739
0
    where
1740
0
        A: Allocator,
1741
0
    {
1742
0
        debug_assert!(buckets.is_power_of_two());
1743
1744
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1745
0
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1746
0
            Some(lco) => lco,
1747
0
            None => return Err(fallibility.capacity_overflow()),
1748
        };
1749
1750
0
        let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1751
0
            Ok(block) => block.cast(),
1752
0
            Err(_) => return Err(fallibility.alloc_err(layout)),
1753
        };
1754
1755
        // SAFETY: null pointer will be caught in above check
1756
0
        let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1757
0
        Ok(Self {
1758
0
            ctrl,
1759
0
            bucket_mask: buckets - 1,
1760
0
            items: 0,
1761
0
            growth_left: bucket_mask_to_capacity(buckets - 1),
1762
0
        })
1763
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::new_uninitialized::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::new_uninitialized::<hashbrown::raw::inner::alloc::inner::Global>
1764
1765
    /// Attempts to allocate a new [`RawTableInner`] with at least enough
1766
    /// capacity for inserting the given number of elements without reallocating.
1767
    ///
1768
    /// All the control bytes are initialized with the [`EMPTY`] bytes.
1769
    #[inline]
1770
0
    fn fallible_with_capacity<A>(
1771
0
        alloc: &A,
1772
0
        table_layout: TableLayout,
1773
0
        capacity: usize,
1774
0
        fallibility: Fallibility,
1775
0
    ) -> Result<Self, TryReserveError>
1776
0
    where
1777
0
        A: Allocator,
1778
0
    {
1779
0
        if capacity == 0 {
1780
0
            Ok(Self::NEW)
1781
        } else {
1782
            // SAFETY: We checked that we could successfully allocate the new table, and then
1783
            // initialized all control bytes with the constant `EMPTY` byte.
1784
            unsafe {
1785
0
                let buckets =
1786
0
                    capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::fallible_with_capacity::<_>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::fallible_with_capacity::<hashbrown::raw::inner::alloc::inner::Global>::{closure#0}
1787
1788
0
                let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1789
                // SAFETY: We checked that the table is allocated and therefore the table already has
1790
                // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1791
                // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1792
0
                result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
1793
0
1794
0
                Ok(result)
1795
            }
1796
        }
1797
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::fallible_with_capacity::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::fallible_with_capacity::<hashbrown::raw::inner::alloc::inner::Global>
1798
1799
    /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1800
    /// the given number of elements without reallocating.
1801
    ///
1802
    /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1803
    /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1804
    /// handle memory allocation failure.
1805
    ///
1806
    /// All the control bytes are initialized with the [`EMPTY`] bytes.
1807
    ///
1808
    /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1809
    /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
1810
0
    fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
1811
0
    where
1812
0
        A: Allocator,
1813
0
    {
1814
0
        // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1815
0
        match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
1816
0
            Ok(table_inner) => table_inner,
1817
            // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
1818
0
            Err(_) => unsafe { hint::unreachable_unchecked() },
1819
        }
1820
0
    }
1821
1822
    /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
1823
    ///
1824
    /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
1825
    /// bytes outside the range of the table are filled with [`EMPTY`] entries. These will unfortunately
1826
    /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
1827
    /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
1828
    /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
1829
    /// We detect this situation here and perform a second scan starting at the beginning of the table.
1830
    /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
1831
    /// trailing control bytes (containing [`EMPTY`] bytes).
1832
    ///
1833
    /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
1834
    /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
1835
    /// `Safety`).
1836
    ///
1837
    /// # Warning
1838
    ///
1839
    /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
1840
    /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
1841
    /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
1842
    /// index will cause immediate [`undefined behavior`].
1843
    ///
1844
    /// # Safety
1845
    ///
1846
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
1847
    /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
1848
    /// of this crate, the following rules are necessary and sufficient:
1849
    ///
1850
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
1851
    ///   function results in [`undefined behavior`].
1852
    ///
1853
    /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
1854
    ///   (after the `find_insert_slot_in_group` function, but before insertion into the table).
1855
    ///
1856
    /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
1857
    ///   (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
1858
    ///
1859
    /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
1860
    /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
1861
    /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
1862
    ///
1863
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1864
    /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
1865
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1866
    #[inline]
1867
0
    unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1868
0
        // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1869
0
        if unlikely(self.is_bucket_full(index)) {
1870
0
            debug_assert!(self.bucket_mask < Group::WIDTH);
1871
            // SAFETY:
1872
            //
1873
            // * Since the caller of this function ensures that the control bytes are properly
1874
            //   initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1875
            //   bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1876
            //   and points to the properly initialized control bytes (see also
1877
            //   `TableLayout::calculate_layout_for` and `ptr::read`);
1878
            //
1879
            // * Because the caller of this function ensures that the index was provided by the
1880
            //   `self.find_insert_slot_in_group()` function, so for for tables larger than the
1881
            //   group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1882
            //   branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
1883
            //   cannot return a full bucket index. For tables smaller than the group width, calling
1884
            //   the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1885
            //   the range of the table are filled with EMPTY bytes (and we know for sure that there
1886
            //   is at least one FULL bucket), so this second scan either finds an empty slot (due to
1887
            //   the load factor) or hits the trailing control bytes (containing EMPTY).
1888
0
            index = Group::load_aligned(self.ctrl(0))
1889
0
                .match_empty_or_deleted()
1890
0
                .lowest_set_bit()
1891
0
                .unwrap_unchecked();
1892
0
        }
1893
0
        InsertSlot { index }
1894
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::fix_insert_slot
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::fix_insert_slot
1895
1896
    /// Finds the position to insert something in a group.
1897
    ///
1898
    /// **This may have false positives and must be fixed up with `fix_insert_slot`
1899
    /// before it's used.**
1900
    ///
1901
    /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
1902
    /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
1903
    #[inline]
1904
0
    fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1905
0
        let bit = group.match_empty_or_deleted().lowest_set_bit();
1906
0
1907
0
        if likely(bit.is_some()) {
1908
            // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1909
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1910
0
            Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1911
        } else {
1912
0
            None
1913
        }
1914
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::find_insert_slot_in_group
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::find_insert_slot_in_group
1915
1916
    /// Searches for an element in the table, or a potential slot where that element could
1917
    /// be inserted (an empty or deleted [`Bucket`] index).
1918
    ///
1919
    /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1920
    /// eliminated by LLVM optimizations.
1921
    ///
1922
    /// This function does not make any changes to the `data` part of the table, or any
1923
    /// changes to the `items` or `growth_left` field of the table.
1924
    ///
1925
    /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
1926
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
1927
    /// will never return (will go into an infinite loop) for tables larger than the group
1928
    /// width, or return an index outside of the table indices range if the table is less
1929
    /// than the group width.
1930
    ///
1931
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1932
    /// function with only `FULL` buckets' indices and return the `index` of the found
1933
    /// element (as `Ok(index)`). If the element is not found and there is at least 1
1934
    /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
1935
    /// [InsertSlot] with an index in the range `0..self.buckets()`, but in any case,
1936
    /// if this function returns [`InsertSlot`], it will contain an index in the range
1937
    /// `0..=self.buckets()`.
1938
    ///
1939
    /// # Safety
1940
    ///
1941
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1942
    /// this function results in [`undefined behavior`].
1943
    ///
1944
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1945
    /// less than the group width and if there was not at least one empty or deleted bucket in
1946
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1947
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
1948
    /// control bytes outside the table range.
1949
    ///
1950
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1951
    #[inline]
1952
0
    unsafe fn find_or_find_insert_slot_inner(
1953
0
        &self,
1954
0
        hash: u64,
1955
0
        eq: &mut dyn FnMut(usize) -> bool,
1956
0
    ) -> Result<usize, InsertSlot> {
1957
0
        let mut insert_slot = None;
1958
0
1959
0
        let h2_hash = h2(hash);
1960
0
        let mut probe_seq = self.probe_seq(hash);
1961
1962
        loop {
1963
            // SAFETY:
1964
            // * Caller of this function ensures that the control bytes are properly initialized.
1965
            //
1966
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1967
            //   of the table due to masking with `self.bucket_mask` and also because mumber of
1968
            //   buckets is a power of two (see `self.probe_seq` function).
1969
            //
1970
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1971
            //   call `Group::load` due to the extended control bytes range, which is
1972
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1973
            //   byte will never be read for the allocated table);
1974
            //
1975
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1976
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1977
            //   bytes, which is safe (see RawTableInner::new).
1978
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1979
1980
0
            for bit in group.match_byte(h2_hash) {
1981
0
                let index = (probe_seq.pos + bit) & self.bucket_mask;
1982
0
1983
0
                if likely(eq(index)) {
1984
0
                    return Ok(index);
1985
0
                }
1986
            }
1987
1988
            // We didn't find the element we were looking for in the group, try to get an
1989
            // insertion slot from the group if we don't have one yet.
1990
0
            if likely(insert_slot.is_none()) {
1991
0
                insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1992
0
            }
1993
1994
            // Only stop the search if the group contains at least one empty element.
1995
            // Otherwise, the element that we are looking for might be in a following group.
1996
0
            if likely(group.match_empty().any_bit_set()) {
1997
                // We must have found a insert slot by now, since the current group contains at
1998
                // least one. For tables smaller than the group width, there will still be an
1999
                // empty element in the current (and only) group due to the load factor.
2000
                unsafe {
2001
                    // SAFETY:
2002
                    // * Caller of this function ensures that the control bytes are properly initialized.
2003
                    //
2004
                    // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
2005
0
                    return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked()));
2006
                }
2007
0
            }
2008
0
2009
0
            probe_seq.move_next(self.bucket_mask);
2010
        }
2011
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::find_or_find_insert_slot_inner
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::find_or_find_insert_slot_inner
2012
2013
    /// Searches for an empty or deleted bucket which is suitable for inserting a new
2014
    /// element and sets the hash for that slot. Returns an index of that slot and the
2015
    /// old control byte stored in the found index.
2016
    ///
2017
    /// This function does not check if the given element exists in the table. Also,
2018
    /// this function does not check if there is enough space in the table to insert
2019
    /// a new element. Caller of the funtion must make ensure that the table has at
2020
    /// least 1 empty or deleted `bucket`, otherwise this function will never return
2021
    /// (will go into an infinite loop) for tables larger than the group width, or
2022
    /// return an index outside of the table indices range if the table is less than
2023
    /// the group width.
2024
    ///
2025
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
2026
    /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
2027
    /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
2028
    ///
2029
    /// This function does not make any changes to the `data` parts of the table,
2030
    /// or any changes to the `items` or `growth_left` field of the table.
2031
    ///
2032
    /// # Safety
2033
    ///
2034
    /// The safety rules are directly derived from the safety rules for the
2035
    /// [`RawTableInner::set_ctrl_h2`] and [`RawTableInner::find_insert_slot`] methods.
2036
    /// Thus, in order to uphold the safety contracts for that methods, as well as for
2037
    /// the correct logic of the work of this crate, you must observe the following rules
2038
    /// when calling this function:
2039
    ///
2040
    /// * The [`RawTableInner`] has already been allocated and has properly initialized
2041
    ///   control bytes otherwise calling this function results in [`undefined behavior`].
2042
    ///
2043
    /// * The caller of this function must ensure that the "data" parts of the table
2044
    ///   will have an entry in the returned index (matching the given hash) right
2045
    ///   after calling this function.
2046
    ///
2047
    /// Attempt to write data at the `index` returned by this function when the table is
2048
    /// less than the group width and if there was not at least one empty or deleted bucket in
2049
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
2050
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
2051
    /// control bytes outside the table range.
2052
    ///
2053
    /// The caller must independently increase the `items` field of the table, and also,
2054
    /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left`
2055
    /// field, and do not change it if the old control byte was [`DELETED`].
2056
    ///
2057
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2058
    /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
2059
    ///
2060
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2061
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2062
    /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
2063
    /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
2064
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2065
    #[inline]
2066
0
    unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) {
2067
0
        // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
2068
0
        let index: usize = self.find_insert_slot(hash).index;
2069
0
        // SAFETY:
2070
0
        // 1. The `find_insert_slot` function either returns an `index` less than or
2071
0
        //    equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
2072
0
        //    returns if it cannot find an empty or deleted slot.
2073
0
        // 2. The caller of this function guarantees that the table has already been
2074
0
        //    allocated
2075
0
        let old_ctrl = *self.ctrl(index);
2076
0
        self.set_ctrl_h2(index, hash);
2077
0
        (index, old_ctrl)
2078
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_insert_slot
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_insert_slot
2079
2080
    /// Searches for an empty or deleted bucket which is suitable for inserting
2081
    /// a new element, returning the `index` for the new [`Bucket`].
2082
    ///
2083
    /// This function does not make any changes to the `data` part of the table, or any
2084
    /// changes to the `items` or `growth_left` field of the table.
2085
    ///
2086
    /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
2087
    /// will never return (will go into an infinite loop) for tables larger than the group
2088
    /// width, or return an index outside of the table indices range if the table is less
2089
    /// than the group width.
2090
    ///
2091
    /// If there is at least 1 empty or deleted `bucket` in the table, the function is
2092
    /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
2093
    /// but in any case, if this function returns [`InsertSlot`], it will contain an index
2094
    /// in the range `0..=self.buckets()`.
2095
    ///
2096
    /// # Safety
2097
    ///
2098
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
2099
    /// this function results in [`undefined behavior`].
2100
    ///
2101
    /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
2102
    /// less than the group width and if there was not at least one empty or deleted bucket in
2103
    /// the table will cause immediate [`undefined behavior`]. This is because in this case the
2104
    /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
2105
    /// control bytes outside the table range.
2106
    ///
2107
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2108
    #[inline]
2109
0
    unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
2110
0
        let mut probe_seq = self.probe_seq(hash);
2111
        loop {
2112
            // SAFETY:
2113
            // * Caller of this function ensures that the control bytes are properly initialized.
2114
            //
2115
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
2116
            //   of the table due to masking with `self.bucket_mask` and also because mumber of
2117
            //   buckets is a power of two (see `self.probe_seq` function).
2118
            //
2119
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
2120
            //   call `Group::load` due to the extended control bytes range, which is
2121
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
2122
            //   byte will never be read for the allocated table);
2123
            //
2124
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
2125
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
2126
            //   bytes, which is safe (see RawTableInner::new).
2127
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
2128
0
2129
0
            let index = self.find_insert_slot_in_group(&group, &probe_seq);
2130
0
            if likely(index.is_some()) {
2131
                // SAFETY:
2132
                // * Caller of this function ensures that the control bytes are properly initialized.
2133
                //
2134
                // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
2135
                unsafe {
2136
0
                    return self.fix_insert_slot(index.unwrap_unchecked());
2137
                }
2138
0
            }
2139
0
            probe_seq.move_next(self.bucket_mask);
2140
        }
2141
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::find_insert_slot
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::find_insert_slot
2142
2143
    /// Searches for an element in a table, returning the `index` of the found element.
2144
    /// This uses dynamic dispatch to reduce the amount of code generated, but it is
2145
    /// eliminated by LLVM optimizations.
2146
    ///
2147
    /// This function does not make any changes to the `data` part of the table, or any
2148
    /// changes to the `items` or `growth_left` field of the table.
2149
    ///
2150
    /// The table must have at least 1 empty `bucket`, otherwise, if the
2151
    /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
2152
    /// this function will also never return (will go into an infinite loop).
2153
    ///
2154
    /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
2155
    /// function with only `FULL` buckets' indices and return the `index` of the found
2156
    /// element as `Some(index)`, so the index will always be in the range
2157
    /// `0..self.buckets()`.
2158
    ///
2159
    /// # Safety
2160
    ///
2161
    /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
2162
    /// this function results in [`undefined behavior`].
2163
    ///
2164
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2165
    #[inline(always)]
2166
0
    unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
2167
0
        let h2_hash = h2(hash);
2168
0
        let mut probe_seq = self.probe_seq(hash);
2169
2170
        loop {
2171
            // SAFETY:
2172
            // * Caller of this function ensures that the control bytes are properly initialized.
2173
            //
2174
            // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
2175
            //   of the table due to masking with `self.bucket_mask`.
2176
            //
2177
            // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
2178
            //   call `Group::load` due to the extended control bytes range, which is
2179
            //  `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
2180
            //   byte will never be read for the allocated table);
2181
            //
2182
            // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
2183
            //   always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
2184
            //   bytes, which is safe (see RawTableInner::new_in).
2185
0
            let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
2186
2187
0
            for bit in group.match_byte(h2_hash) {
2188
                // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
2189
                // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2190
0
                let index = (probe_seq.pos + bit) & self.bucket_mask;
2191
0
2192
0
                if likely(eq(index)) {
2193
0
                    return Some(index);
2194
0
                }
2195
            }
2196
2197
0
            if likely(group.match_empty().any_bit_set()) {
2198
0
                return None;
2199
0
            }
2200
0
2201
0
            probe_seq.move_next(self.bucket_mask);
2202
        }
2203
0
    }
2204
2205
    /// Prepares for rehashing data in place (that is, without allocating new memory).
2206
    /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control
2207
    /// bytes to `EMPTY`, i.e. performs the following conversion:
2208
    ///
2209
    /// - `EMPTY` control bytes   -> `EMPTY`;
2210
    /// - `DELETED` control bytes -> `EMPTY`;
2211
    /// - `FULL` control bytes    -> `DELETED`.
2212
    ///
2213
    /// This function does not make any changes to the `data` parts of the table,
2214
    /// or any changes to the `items` or `growth_left` field of the table.
2215
    ///
2216
    /// # Safety
2217
    ///
2218
    /// You must observe the following safety rules when calling this function:
2219
    ///
2220
    /// * The [`RawTableInner`] has already been allocated;
2221
    ///
2222
    /// * The caller of this function must convert the `DELETED` bytes back to `FULL`
2223
    ///   bytes when re-inserting them into their ideal position (which was impossible
2224
    ///   to do during the first insert due to tombstones). If the caller does not do
2225
    ///   this, then calling this function may result in a memory leak.
2226
    ///
2227
    /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
2228
    ///   calling this function results in [`undefined behavior`].
2229
    ///
2230
    /// Calling this function on a table that has not been allocated results in
2231
    /// [`undefined behavior`].
2232
    ///
2233
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2234
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2235
    ///
2236
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2237
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2238
    #[allow(clippy::mut_mut)]
2239
    #[inline]
2240
0
    unsafe fn prepare_rehash_in_place(&mut self) {
2241
        // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
2242
        // This effectively frees up all buckets containing a DELETED entry.
2243
        //
2244
        // SAFETY:
2245
        // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
2246
        // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
2247
        //    due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
2248
        // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
2249
        // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
2250
        //    and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
2251
0
        for i in (0..self.buckets()).step_by(Group::WIDTH) {
2252
0
            let group = Group::load_aligned(self.ctrl(i));
2253
0
            let group = group.convert_special_to_empty_and_full_to_deleted();
2254
0
            group.store_aligned(self.ctrl(i));
2255
0
        }
2256
2257
        // Fix up the trailing control bytes. See the comments in set_ctrl
2258
        // for the handling of tables smaller than the group width.
2259
        //
2260
        // SAFETY: The caller of this function guarantees that [`RawTableInner`]
2261
        // has already been allocated
2262
0
        if unlikely(self.buckets() < Group::WIDTH) {
2263
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
2264
0
            // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
2265
0
            // `Group::WIDTH` is safe
2266
0
            self.ctrl(0)
2267
0
                .copy_to(self.ctrl(Group::WIDTH), self.buckets());
2268
0
        } else {
2269
0
            // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
2270
0
            // control bytes,so copying `Group::WIDTH` bytes with offset equal
2271
0
            // to `self.buckets() == self.bucket_mask + 1` is safe
2272
0
            self.ctrl(0)
2273
0
                .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2274
0
        }
2275
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_rehash_in_place
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_rehash_in_place
2276
2277
    /// Returns an iterator over every element in the table.
2278
    ///
2279
    /// # Safety
2280
    ///
2281
    /// If any of the following conditions are violated, the result
2282
    /// is [`undefined behavior`]:
2283
    ///
2284
    /// * The caller has to ensure that the `RawTableInner` outlives the
2285
    ///   `RawIter`. Because we cannot make the `next` method unsafe on
2286
    ///   the `RawIter` struct, we have to make the `iter` method unsafe.
2287
    ///
2288
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2289
    ///
2290
    /// The type `T` must be the actual type of the elements stored in the table,
2291
    /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
2292
    ///
2293
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2294
    #[inline]
2295
0
    unsafe fn iter<T>(&self) -> RawIter<T> {
2296
0
        // SAFETY:
2297
0
        // 1. Since the caller of this function ensures that the control bytes
2298
0
        //    are properly initialized and `self.data_end()` points to the start
2299
0
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2300
0
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2301
0
        //    control bytes.
2302
0
        // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2303
0
        //    equal to zero).
2304
0
        // 3. We pass the exact value of buckets of the table to the function.
2305
0
        //
2306
0
        //                         `ctrl` points here (to the start
2307
0
        //                         of the first control byte `CT0`)
2308
0
        //                          ∨
2309
0
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2310
0
        //                           \________  ________/
2311
0
        //                                    \/
2312
0
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2313
0
        //
2314
0
        // where: T0...T_n  - our stored data;
2315
0
        //        CT0...CT_n - control bytes or metadata for `data`.
2316
0
        //        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2317
0
        //                        with loading `Group` bytes from the heap works properly, even if the result
2318
0
        //                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2319
0
        //                        `RawTableInner::set_ctrl` function.
2320
0
        //
2321
0
        // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2322
0
        // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2323
0
        let data = Bucket::from_base_index(self.data_end(), 0);
2324
0
        RawIter {
2325
0
            // SAFETY: See explanation above
2326
0
            iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2327
0
            items: self.items,
2328
0
        }
2329
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::iter::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::iter::<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::iter::<(gix_hash::object_id::ObjectId, u32)>
2330
2331
    /// Executes the destructors (if any) of the values stored in the table.
2332
    ///
2333
    /// # Note
2334
    ///
2335
    /// This function does not erase the control bytes of the table and does
2336
    /// not make any changes to the `items` or `growth_left` fields of the
2337
    /// table. If necessary, the caller of this function must manually set
2338
    /// up these table fields, for example using the [`clear_no_drop`] function.
2339
    ///
2340
    /// Be careful during calling this function, because drop function of
2341
    /// the elements can panic, and this can leave table in an inconsistent
2342
    /// state.
2343
    ///
2344
    /// # Safety
2345
    ///
2346
    /// The type `T` must be the actual type of the elements stored in the table,
2347
    /// otherwise calling this function may result in [`undefined behavior`].
2348
    ///
2349
    /// If `T` is a type that should be dropped and **the table is not empty**,
2350
    /// calling this function more than once results in [`undefined behavior`].
2351
    ///
2352
    /// If `T` is not [`Copy`], attempting to use values stored in the table after
2353
    /// calling this function may result in [`undefined behavior`].
2354
    ///
2355
    /// It is safe to call this function on a table that has not been allocated,
2356
    /// on a table with uninitialized control bytes, and on a table with no actual
2357
    /// data but with `Full` control bytes if `self.items == 0`.
2358
    ///
2359
    /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2360
    /// about of properly removing or saving `element` from / into the [`RawTable`] /
2361
    /// [`RawTableInner`].
2362
    ///
2363
    /// [`Bucket::drop`]: Bucket::drop
2364
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2365
    /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2366
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2367
0
    unsafe fn drop_elements<T>(&mut self) {
2368
0
        // Check that `self.items != 0`. Protects against the possibility
2369
0
        // of creating an iterator on an table with uninitialized control bytes.
2370
0
        if T::NEEDS_DROP && self.items != 0 {
2371
            // SAFETY: We know for sure that RawTableInner will outlive the
2372
            // returned `RawIter` iterator, and the caller of this function
2373
            // must uphold the safety contract for `drop_elements` method.
2374
0
            for item in self.iter::<T>() {
2375
0
                // SAFETY: The caller must uphold the safety contract for
2376
0
                // `drop_elements` method.
2377
0
                item.drop();
2378
0
            }
2379
0
        }
2380
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::drop_elements::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::drop_elements::<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::drop_elements::<(gix_hash::object_id::ObjectId, u32)>
2381
2382
    /// Executes the destructors (if any) of the values stored in the table and than
2383
    /// deallocates the table.
2384
    ///
2385
    /// # Note
2386
    ///
2387
    /// Calling this function automatically makes invalid (dangling) all instances of
2388
    /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2389
    ///
2390
    /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2391
    /// fields of the table. If necessary, the caller of this function must manually set
2392
    /// up these table fields.
2393
    ///
2394
    /// # Safety
2395
    ///
2396
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2397
    ///
2398
    /// * Calling this function more than once;
2399
    ///
2400
    /// * The type `T` must be the actual type of the elements stored in the table.
2401
    ///
2402
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2403
    ///   to allocate this table.
2404
    ///
2405
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2406
    ///   was used to allocate this table.
2407
    ///
2408
    /// The caller of this function should pay attention to the possibility of the
2409
    /// elements' drop function panicking, because this:
2410
    ///
2411
    ///    * May leave the table in an inconsistent state;
2412
    ///
2413
    ///    * Memory is never deallocated, so a memory leak may occur.
2414
    ///
2415
    /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2416
    /// function results in [`undefined behavior`].
2417
    ///
2418
    /// It is safe to call this function on a table that has not been allocated,
2419
    /// on a table with uninitialized control bytes, and on a table with no actual
2420
    /// data but with `Full` control bytes if `self.items == 0`.
2421
    ///
2422
    /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2423
    /// for more  information.
2424
    ///
2425
    /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2426
    /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2427
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2428
0
    unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2429
0
        if !self.is_empty_singleton() {
2430
0
            unsafe {
2431
0
                // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2432
0
                self.drop_elements::<T>();
2433
0
                // SAFETY:
2434
0
                // 1. We have checked that our table is allocated.
2435
0
                // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2436
0
                self.free_buckets(alloc, table_layout);
2437
0
            }
2438
0
        }
2439
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::drop_inner_table::<_, _>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::drop_inner_table::<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>), hashbrown::raw::inner::alloc::inner::Global>
2440
2441
    /// Returns a pointer to an element in the table (convenience for
2442
    /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
2443
    ///
2444
    /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
2445
    /// otherwise using it may result in [`undefined behavior`].
2446
    ///
2447
    /// # Safety
2448
    ///
2449
    /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
2450
    /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
2451
    /// this function, the following safety rules must be observed:
2452
    ///
2453
    /// * The table must already be allocated;
2454
    ///
2455
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2456
    ///   function, i.e. `(index + 1) <= self.buckets()`.
2457
    ///
2458
    /// * The type `T` must be the actual type of the elements stored in the table, otherwise
2459
    ///   using the returned [`Bucket`] may result in [`undefined behavior`].
2460
    ///
2461
    /// It is safe to call this function with index of zero (`index == 0`) on a table that has
2462
    /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
2463
    ///
2464
    /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
2465
    /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
2466
    /// `(index + 1) <= self.buckets()`.
2467
    ///
2468
    /// ```none
2469
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2470
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2471
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2472
    ///
2473
    ///           `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
2474
    ///           part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
2475
    ///                  |
2476
    ///                  |               `base = table.data_end::<T>()` points here
2477
    ///                  |               (to the start of CT0 or to the end of T0)
2478
    ///                  v                 v
2479
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2480
    ///                     ^                                              \__________  __________/
2481
    ///        `table.bucket(3)` returns a pointer that points                        \/
2482
    ///         here in the `data` part of the `RawTableInner`             additional control bytes
2483
    ///         (to the end of T3)                                          `m = Group::WIDTH - 1`
2484
    ///
2485
    /// where: T0...T_n  - our stored data;
2486
    ///        CT0...CT_n - control bytes or metadata for `data`;
2487
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2488
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2489
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2490
    ///
2491
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2492
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2493
    /// ```
2494
    ///
2495
    /// [`Bucket::from_base_index`]: Bucket::from_base_index
2496
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2497
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2498
    #[inline]
2499
0
    unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2500
0
        debug_assert_ne!(self.bucket_mask, 0);
2501
0
        debug_assert!(index < self.buckets());
2502
0
        Bucket::from_base_index(self.data_end(), index)
2503
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::bucket::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::bucket::<(gix_hash::object_id::ObjectId, u32)>
2504
2505
    /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
2506
    /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
2507
    ///
2508
    /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
2509
    /// otherwise using it may result in [`undefined behavior`].
2510
    ///
2511
    /// # Safety
2512
    ///
2513
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2514
    ///
2515
    /// * The table must already be allocated;
2516
    ///
2517
    /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2518
    ///   function, i.e. `(index + 1) <= self.buckets()`;
2519
    ///
2520
    /// * The `size_of` must be equal to the size of the elements stored in the table;
2521
    ///
2522
    /// ```none
2523
    /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2524
    /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2525
    /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2526
    ///
2527
    ///           `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
2528
    ///           `data` part of the `RawTableInner`, i.e. to the start of T3
2529
    ///                  |
2530
    ///                  |               `base = table.data_end::<u8>()` points here
2531
    ///                  |               (to the start of CT0 or to the end of T0)
2532
    ///                  v                 v
2533
    /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2534
    ///                                                                    \__________  __________/
2535
    ///                                                                               \/
2536
    ///                                                                    additional control bytes
2537
    ///                                                                     `m = Group::WIDTH - 1`
2538
    ///
2539
    /// where: T0...T_n  - our stored data;
2540
    ///        CT0...CT_n - control bytes or metadata for `data`;
2541
    ///        CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2542
    ///                        the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2543
    ///                        is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2544
    ///
2545
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2546
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2547
    /// ```
2548
    ///
2549
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2550
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2551
    #[inline]
2552
0
    unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2553
0
        debug_assert_ne!(self.bucket_mask, 0);
2554
0
        debug_assert!(index < self.buckets());
2555
0
        let base: *mut u8 = self.data_end().as_ptr();
2556
0
        base.sub((index + 1) * size_of)
2557
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::bucket_ptr
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::bucket_ptr
2558
2559
    /// Returns pointer to one past last `data` element in the table as viewed from
2560
    /// the start point of the allocation (convenience for `self.ctrl.cast()`).
2561
    ///
2562
    /// This function actually returns a pointer to the end of the `data element` at
2563
    /// index "0" (zero).
2564
    ///
2565
    /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
2566
    /// otherwise using it may result in [`undefined behavior`].
2567
    ///
2568
    /// # Note
2569
    ///
2570
    /// The type `T` must be the actual type of the elements stored in the table, otherwise
2571
    /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
2572
    ///
2573
    /// ```none
2574
    ///                        `table.data_end::<T>()` returns pointer that points here
2575
    ///                        (to the end of `T0`)
2576
    ///                          ∨
2577
    /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2578
    ///                           \________  ________/
2579
    ///                                    \/
2580
    ///       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2581
    ///
2582
    /// where: T0...T_n  - our stored data;
2583
    ///        CT0...CT_n - control bytes or metadata for `data`.
2584
    ///        CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2585
    ///                        with loading `Group` bytes from the heap works properly, even if the result
2586
    ///                        of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2587
    ///                        `RawTableInner::set_ctrl` function.
2588
    ///
2589
    /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2590
    /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2591
    /// ```
2592
    ///
2593
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2594
    #[inline]
2595
0
    fn data_end<T>(&self) -> NonNull<T> {
2596
0
        self.ctrl.cast()
2597
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::data_end::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::data_end::<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::data_end::<(gix_hash::object_id::ObjectId, u32)>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::data_end::<u8>
2598
2599
    /// Returns an iterator-like object for a probe sequence on the table.
2600
    ///
2601
    /// This iterator never terminates, but is guaranteed to visit each bucket
2602
    /// group exactly once. The loop using `probe_seq` must terminate upon
2603
    /// reaching a group containing an empty bucket.
2604
    #[inline]
2605
0
    fn probe_seq(&self, hash: u64) -> ProbeSeq {
2606
0
        ProbeSeq {
2607
0
            // This is the same as `hash as usize % self.buckets()` because the number
2608
0
            // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2609
0
            pos: h1(hash) & self.bucket_mask,
2610
0
            stride: 0,
2611
0
        }
2612
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::probe_seq
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::probe_seq
2613
2614
    /// Returns the index of a bucket for which a value must be inserted if there is enough rooom
2615
    /// in the table, otherwise returns error
2616
    #[cfg(feature = "raw")]
2617
    #[inline]
2618
0
    unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
2619
0
        let index = self.find_insert_slot(hash).index;
2620
0
        let old_ctrl = *self.ctrl(index);
2621
0
        if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
2622
0
            Err(())
2623
        } else {
2624
0
            self.record_item_insert_at(index, old_ctrl, hash);
2625
0
            Ok(index)
2626
        }
2627
0
    }
2628
2629
    #[inline]
2630
0
    unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) {
2631
0
        self.growth_left -= usize::from(special_is_empty(old_ctrl));
2632
0
        self.set_ctrl_h2(index, hash);
2633
0
        self.items += 1;
2634
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::record_item_insert_at
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::record_item_insert_at
2635
2636
    #[inline]
2637
0
    fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2638
0
        let probe_seq_pos = self.probe_seq(hash).pos;
2639
0
        let probe_index =
2640
0
            |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_in_same_group::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_in_same_group::{closure#0}
2641
0
        probe_index(i) == probe_index(new_i)
2642
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_in_same_group
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_in_same_group
2643
2644
    /// Sets a control byte to the hash, and possibly also the replicated control byte at
2645
    /// the end of the array.
2646
    ///
2647
    /// This function does not make any changes to the `data` parts of the table,
2648
    /// or any changes to the `items` or `growth_left` field of the table.
2649
    ///
2650
    /// # Safety
2651
    ///
2652
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2653
    /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2654
    /// following rules when calling this function:
2655
    ///
2656
    /// * The [`RawTableInner`] has already been allocated;
2657
    ///
2658
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2659
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2660
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2661
    ///
2662
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2663
    ///
2664
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2665
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2666
    ///
2667
    /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2668
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2669
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2670
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2671
    #[inline]
2672
0
    unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) {
2673
0
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`]
2674
0
        self.set_ctrl(index, h2(hash));
2675
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::set_ctrl_h2
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::set_ctrl_h2
2676
2677
    /// Replaces the hash in the control byte at the given index with the provided one,
2678
    /// and possibly also replicates the new control byte at the end of the array of control
2679
    /// bytes, returning the old control byte.
2680
    ///
2681
    /// This function does not make any changes to the `data` parts of the table,
2682
    /// or any changes to the `items` or `growth_left` field of the table.
2683
    ///
2684
    /// # Safety
2685
    ///
2686
    /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`]
2687
    /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2688
    /// methods, you must observe the following rules when calling this function:
2689
    ///
2690
    /// * The [`RawTableInner`] has already been allocated;
2691
    ///
2692
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2693
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2694
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2695
    ///
2696
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2697
    ///
2698
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2699
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2700
    ///
2701
    /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
2702
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2703
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2704
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2705
    #[inline]
2706
0
    unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 {
2707
0
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`]
2708
0
        let prev_ctrl = *self.ctrl(index);
2709
0
        self.set_ctrl_h2(index, hash);
2710
0
        prev_ctrl
2711
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::replace_ctrl_h2
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::replace_ctrl_h2
2712
2713
    /// Sets a control byte, and possibly also the replicated control byte at
2714
    /// the end of the array.
2715
    ///
2716
    /// This function does not make any changes to the `data` parts of the table,
2717
    /// or any changes to the `items` or `growth_left` field of the table.
2718
    ///
2719
    /// # Safety
2720
    ///
2721
    /// You must observe the following safety rules when calling this function:
2722
    ///
2723
    /// * The [`RawTableInner`] has already been allocated;
2724
    ///
2725
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2726
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2727
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
2728
    ///
2729
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2730
    ///
2731
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2732
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2733
    ///
2734
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
2735
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
2736
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2737
    #[inline]
2738
0
    unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) {
2739
0
        // Replicate the first Group::WIDTH control bytes at the end of
2740
0
        // the array without using a branch. If the tables smaller than
2741
0
        // the group width (self.buckets() < Group::WIDTH),
2742
0
        // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2743
0
        //
2744
0
        // - If index >= Group::WIDTH then index == index2.
2745
0
        // - Otherwise index2 == self.bucket_mask + 1 + index.
2746
0
        //
2747
0
        // The very last replicated control byte is never actually read because
2748
0
        // we mask the initial index for unaligned loads, but we write it
2749
0
        // anyways because it makes the set_ctrl implementation simpler.
2750
0
        //
2751
0
        // If there are fewer buckets than Group::WIDTH then this code will
2752
0
        // replicate the buckets at the end of the trailing group. For example
2753
0
        // with 2 buckets and a group size of 4, the control bytes will look
2754
0
        // like this:
2755
0
        //
2756
0
        //     Real    |             Replicated
2757
0
        // ---------------------------------------------
2758
0
        // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
2759
0
        // ---------------------------------------------
2760
0
2761
0
        // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2762
0
        // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2763
0
        let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2764
0
2765
0
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2766
0
        *self.ctrl(index) = ctrl;
2767
0
        *self.ctrl(index2) = ctrl;
2768
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::set_ctrl
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::set_ctrl
2769
2770
    /// Returns a pointer to a control byte.
2771
    ///
2772
    /// # Safety
2773
    ///
2774
    /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2775
    /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2776
    /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2777
    /// will return a pointer to the end of the allocated table and it is useless on its own.
2778
    ///
2779
    /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2780
    /// table that has not been allocated results in [`Undefined Behavior`].
2781
    ///
2782
    /// So to satisfy both requirements you should always follow the rule that
2783
    /// `index < self.bucket_mask + 1 + Group::WIDTH`
2784
    ///
2785
    /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2786
    /// for read-only purpose.
2787
    ///
2788
    /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2789
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2790
    ///
2791
    /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2792
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2793
    #[inline]
2794
0
    unsafe fn ctrl(&self, index: usize) -> *mut u8 {
2795
0
        debug_assert!(index < self.num_ctrl_bytes());
2796
        // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2797
0
        self.ctrl.as_ptr().add(index)
2798
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::ctrl
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::ctrl
2799
2800
    #[inline]
2801
0
    fn buckets(&self) -> usize {
2802
0
        self.bucket_mask + 1
2803
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::buckets
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::buckets
2804
2805
    /// Checks whether the bucket at `index` is full.
2806
    ///
2807
    /// # Safety
2808
    ///
2809
    /// The caller must ensure `index` is less than the number of buckets.
2810
    #[inline]
2811
0
    unsafe fn is_bucket_full(&self, index: usize) -> bool {
2812
0
        debug_assert!(index < self.buckets());
2813
0
        is_full(*self.ctrl(index))
2814
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_bucket_full
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_bucket_full
2815
2816
    #[inline]
2817
0
    fn num_ctrl_bytes(&self) -> usize {
2818
0
        self.bucket_mask + 1 + Group::WIDTH
2819
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::num_ctrl_bytes
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::num_ctrl_bytes
2820
2821
    #[inline]
2822
0
    fn is_empty_singleton(&self) -> bool {
2823
0
        self.bucket_mask == 0
2824
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_empty_singleton
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::is_empty_singleton
2825
2826
    /// Attempts to allocate a new hash table with at least enough capacity
2827
    /// for inserting the given number of elements without reallocating,
2828
    /// and return it inside ScopeGuard to protect against panic in the hash
2829
    /// function.
2830
    ///
2831
    /// # Note
2832
    ///
2833
    /// It is recommended (but not required):
2834
    ///
2835
    /// * That the new table's `capacity` be greater than or equal to `self.items`.
2836
    ///
2837
    /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2838
    ///   to allocate this table.
2839
    ///
2840
    /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2841
    ///   to allocate this table.
2842
    ///
2843
    /// If `table_layout` does not match the `TableLayout` that was used to allocate
2844
    /// this table, then using `mem::swap` with the `self` and the new table returned
2845
    /// by this function results in [`undefined behavior`].
2846
    ///
2847
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2848
    #[allow(clippy::mut_mut)]
2849
    #[inline]
2850
0
    fn prepare_resize<'a, A>(
2851
0
        &self,
2852
0
        alloc: &'a A,
2853
0
        table_layout: TableLayout,
2854
0
        capacity: usize,
2855
0
        fallibility: Fallibility,
2856
0
    ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2857
0
    where
2858
0
        A: Allocator,
2859
0
    {
2860
0
        debug_assert!(self.items <= capacity);
2861
2862
        // Allocate and initialize the new table.
2863
0
        let new_table =
2864
0
            RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2865
2866
        // The hash function may panic, in which case we simply free the new
2867
        // table without dropping any elements that may have been copied into
2868
        // it.
2869
        //
2870
        // This guard is also used to free the old table on success, see
2871
        // the comment at the bottom of this function.
2872
0
        Ok(guard(new_table, move |self_| {
2873
0
            if !self_.is_empty_singleton() {
2874
0
                // SAFETY:
2875
0
                // 1. We have checked that our table is allocated.
2876
0
                // 2. We know for sure that the `alloc` and `table_layout` matches the
2877
0
                //    [`Allocator`] and [`TableLayout`] used to allocate this table.
2878
0
                unsafe { self_.free_buckets(alloc, table_layout) };
2879
0
            }
2880
0
        }))
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_resize::<_>::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_resize::<hashbrown::raw::inner::alloc::inner::Global>::{closure#0}
2881
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_resize::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::prepare_resize::<hashbrown::raw::inner::alloc::inner::Global>
2882
2883
    /// Reserves or rehashes to make room for `additional` more elements.
2884
    ///
2885
    /// This uses dynamic dispatch to reduce the amount of
2886
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
2887
    ///
2888
    /// # Safety
2889
    ///
2890
    /// If any of the following conditions are violated, the result is
2891
    /// [`undefined behavior`]:
2892
    ///
2893
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2894
    ///   to allocate this table.
2895
    ///
2896
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2897
    ///   used to allocate this table.
2898
    ///
2899
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2900
    ///   the elements stored in the table.
2901
    ///
2902
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2903
    ///
2904
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2905
    #[allow(clippy::inline_always)]
2906
    #[inline(always)]
2907
0
    unsafe fn reserve_rehash_inner<A>(
2908
0
        &mut self,
2909
0
        alloc: &A,
2910
0
        additional: usize,
2911
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
2912
0
        fallibility: Fallibility,
2913
0
        layout: TableLayout,
2914
0
        drop: Option<fn(*mut u8)>,
2915
0
    ) -> Result<(), TryReserveError>
2916
0
    where
2917
0
        A: Allocator,
2918
0
    {
2919
        // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2920
0
        let new_items = match self.items.checked_add(additional) {
2921
0
            Some(new_items) => new_items,
2922
0
            None => return Err(fallibility.capacity_overflow()),
2923
        };
2924
0
        let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2925
0
        if new_items <= full_capacity / 2 {
2926
            // Rehash in-place without re-allocating if we have plenty of spare
2927
            // capacity that is locked up due to DELETED entries.
2928
2929
            // SAFETY:
2930
            // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2931
            //    (since new_items <= full_capacity / 2);
2932
            // 2. The caller ensures that `drop` function is the actual drop function of
2933
            //    the elements stored in the table.
2934
            // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2935
            //    used to allocate this table.
2936
            // 4. The caller ensures that the control bytes of the `RawTableInner`
2937
            //    are already initialized.
2938
0
            self.rehash_in_place(hasher, layout.size, drop);
2939
0
            Ok(())
2940
        } else {
2941
            // Otherwise, conservatively resize to at least the next size up
2942
            // to avoid churning deletes into frequent rehashes.
2943
            //
2944
            // SAFETY:
2945
            // 1. We know for sure that `capacity >= self.items`.
2946
            // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2947
            //    [`TableLayout`] that were used to allocate this table.
2948
            // 3. The caller ensures that the control bytes of the `RawTableInner`
2949
            //    are already initialized.
2950
0
            self.resize_inner(
2951
0
                alloc,
2952
0
                usize::max(new_items, full_capacity + 1),
2953
0
                hasher,
2954
0
                fallibility,
2955
0
                layout,
2956
0
            )
2957
        }
2958
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::reserve_rehash_inner::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::reserve_rehash_inner::<hashbrown::raw::inner::alloc::inner::Global>
2959
2960
    /// Returns an iterator over full buckets indices in the table.
2961
    ///
2962
    /// # Safety
2963
    ///
2964
    /// Behavior is undefined if any of the following conditions are violated:
2965
    ///
2966
    /// * The caller has to ensure that the `RawTableInner` outlives the
2967
    ///   `FullBucketsIndices`. Because we cannot make the `next` method
2968
    ///   unsafe on the `FullBucketsIndices` struct, we have to make the
2969
    ///   `full_buckets_indices` method unsafe.
2970
    ///
2971
    /// * The [`RawTableInner`] must have properly initialized control bytes.
2972
    #[inline(always)]
2973
0
    unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2974
0
        // SAFETY:
2975
0
        // 1. Since the caller of this function ensures that the control bytes
2976
0
        //    are properly initialized and `self.ctrl(0)` points to the start
2977
0
        //    of the array of control bytes, therefore: `ctrl` is valid for reads,
2978
0
        //    properly aligned to `Group::WIDTH` and points to the properly initialized
2979
0
        //    control bytes.
2980
0
        // 2. The value of `items` is equal to the amount of data (values) added
2981
0
        //    to the table.
2982
0
        //
2983
0
        //                         `ctrl` points here (to the start
2984
0
        //                         of the first control byte `CT0`)
2985
0
        //                          ∨
2986
0
        // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2987
0
        //                           \________  ________/
2988
0
        //                                    \/
2989
0
        //       `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2990
0
        //
2991
0
        // where: T0...T_n  - our stored data;
2992
0
        //        CT0...CT_n - control bytes or metadata for `data`.
2993
0
        let ctrl = NonNull::new_unchecked(self.ctrl(0));
2994
0
2995
0
        FullBucketsIndices {
2996
0
            // Load the first group
2997
0
            // SAFETY: See explanation above.
2998
0
            current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(),
2999
0
            group_first_index: 0,
3000
0
            ctrl,
3001
0
            items: self.items,
3002
0
        }
3003
0
    }
3004
3005
    /// Allocates a new table of a different size and moves the contents of the
3006
    /// current table into it.
3007
    ///
3008
    /// This uses dynamic dispatch to reduce the amount of
3009
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
3010
    ///
3011
    /// # Safety
3012
    ///
3013
    /// If any of the following conditions are violated, the result is
3014
    /// [`undefined behavior`]:
3015
    ///
3016
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
3017
    ///   to allocate this table;
3018
    ///
3019
    /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
3020
    ///   used to allocate this table;
3021
    ///
3022
    /// * The [`RawTableInner`] must have properly initialized control bytes.
3023
    ///
3024
    /// The caller of this function must ensure that `capacity >= self.items`
3025
    /// otherwise:
3026
    ///
3027
    /// * If `self.items != 0`, calling of this function with `capacity == 0`
3028
    ///   results in [`undefined behavior`].
3029
    ///
3030
    /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
3031
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
3032
    ///   results in [`undefined behavior`].
3033
    ///
3034
    /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
3035
    ///   `self.items > capacity_to_buckets(capacity)` calling this function
3036
    ///   are never return (will go into an infinite loop).
3037
    ///
3038
    /// Note: It is recommended (but not required) that the new table's `capacity`
3039
    /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
3040
    /// this function can never return. See [`RawTableInner::find_insert_slot`] for
3041
    /// more information.
3042
    ///
3043
    /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
3044
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3045
    #[allow(clippy::inline_always)]
3046
    #[inline(always)]
3047
0
    unsafe fn resize_inner<A>(
3048
0
        &mut self,
3049
0
        alloc: &A,
3050
0
        capacity: usize,
3051
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
3052
0
        fallibility: Fallibility,
3053
0
        layout: TableLayout,
3054
0
    ) -> Result<(), TryReserveError>
3055
0
    where
3056
0
        A: Allocator,
3057
0
    {
3058
        // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
3059
        // that were used to allocate this table.
3060
0
        let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
3061
3062
        // SAFETY: We know for sure that RawTableInner will outlive the
3063
        // returned `FullBucketsIndices` iterator, and the caller of this
3064
        // function ensures that the control bytes are properly initialized.
3065
0
        for full_byte_index in self.full_buckets_indices() {
3066
0
            // This may panic.
3067
0
            let hash = hasher(self, full_byte_index);
3068
0
3069
0
            // SAFETY:
3070
0
            // We can use a simpler version of insert() here since:
3071
0
            // 1. There are no DELETED entries.
3072
0
            // 2. We know there is enough space in the table.
3073
0
            // 3. All elements are unique.
3074
0
            // 4. The caller of this function guarantees that `capacity > 0`
3075
0
            //    so `new_table` must already have some allocated memory.
3076
0
            // 5. We set `growth_left` and `items` fields of the new table
3077
0
            //    after the loop.
3078
0
            // 6. We insert into the table, at the returned index, the data
3079
0
            //    matching the given hash immediately after calling this function.
3080
0
            let (new_index, _) = new_table.prepare_insert_slot(hash);
3081
0
3082
0
            // SAFETY:
3083
0
            //
3084
0
            // * `src` is valid for reads of `layout.size` bytes, since the
3085
0
            //   table is alive and the `full_byte_index` is guaranteed to be
3086
0
            //   within bounds (see `FullBucketsIndices::next_impl`);
3087
0
            //
3088
0
            // * `dst` is valid for writes of `layout.size` bytes, since the
3089
0
            //   caller ensures that `table_layout` matches the [`TableLayout`]
3090
0
            //   that was used to allocate old table and we have the `new_index`
3091
0
            //   returned by `prepare_insert_slot`.
3092
0
            //
3093
0
            // * Both `src` and `dst` are properly aligned.
3094
0
            //
3095
0
            // * Both `src` and `dst` point to different region of memory.
3096
0
            ptr::copy_nonoverlapping(
3097
0
                self.bucket_ptr(full_byte_index, layout.size),
3098
0
                new_table.bucket_ptr(new_index, layout.size),
3099
0
                layout.size,
3100
0
            );
3101
0
        }
3102
3103
        // The hash function didn't panic, so we can safely set the
3104
        // `growth_left` and `items` fields of the new table.
3105
0
        new_table.growth_left -= self.items;
3106
0
        new_table.items = self.items;
3107
0
3108
0
        // We successfully copied all elements without panicking. Now replace
3109
0
        // self with the new table. The old table will have its memory freed but
3110
0
        // the items will not be dropped (since they have been moved into the
3111
0
        // new table).
3112
0
        // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
3113
0
        // that was used to allocate this table.
3114
0
        mem::swap(self, &mut new_table);
3115
0
3116
0
        Ok(())
3117
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::resize_inner::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::resize_inner::<hashbrown::raw::inner::alloc::inner::Global>
3118
3119
    /// Rehashes the contents of the table in place (i.e. without changing the
3120
    /// allocation).
3121
    ///
3122
    /// If `hasher` panics then some the table's contents may be lost.
3123
    ///
3124
    /// This uses dynamic dispatch to reduce the amount of
3125
    /// code generated, but it is eliminated by LLVM optimizations when inlined.
3126
    ///
3127
    /// # Safety
3128
    ///
3129
    /// If any of the following conditions are violated, the result is [`undefined behavior`]:
3130
    ///
3131
    /// * The `size_of` must be equal to the size of the elements stored in the table;
3132
    ///
3133
    /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
3134
    ///   the elements stored in the table.
3135
    ///
3136
    /// * The [`RawTableInner`] has already been allocated;
3137
    ///
3138
    /// * The [`RawTableInner`] must have properly initialized control bytes.
3139
    ///
3140
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3141
    #[allow(clippy::inline_always)]
3142
    #[cfg_attr(feature = "inline-more", inline(always))]
3143
    #[cfg_attr(not(feature = "inline-more"), inline)]
3144
0
    unsafe fn rehash_in_place(
3145
0
        &mut self,
3146
0
        hasher: &dyn Fn(&mut Self, usize) -> u64,
3147
0
        size_of: usize,
3148
0
        drop: Option<fn(*mut u8)>,
3149
0
    ) {
3150
0
        // If the hash function panics then properly clean up any elements
3151
0
        // that we haven't rehashed yet. We unfortunately can't preserve the
3152
0
        // element since we lost their hash and have no way of recovering it
3153
0
        // without risking another panic.
3154
0
        self.prepare_rehash_in_place();
3155
0
3156
0
        let mut guard = guard(self, move |self_| {
3157
0
            if let Some(drop) = drop {
3158
0
                for i in 0..self_.buckets() {
3159
0
                    if *self_.ctrl(i) == DELETED {
3160
0
                        self_.set_ctrl(i, EMPTY);
3161
0
                        drop(self_.bucket_ptr(i, size_of));
3162
0
                        self_.items -= 1;
3163
0
                    }
3164
                }
3165
0
            }
3166
0
            self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
3167
0
        });
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::rehash_in_place::{closure#0}
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::rehash_in_place::{closure#0}
3168
3169
        // At this point, DELETED elements are elements that we haven't
3170
        // rehashed yet. Find them and re-insert them at their ideal
3171
        // position.
3172
0
        'outer: for i in 0..guard.buckets() {
3173
0
            if *guard.ctrl(i) != DELETED {
3174
0
                continue;
3175
0
            }
3176
0
3177
0
            let i_p = guard.bucket_ptr(i, size_of);
3178
3179
0
            'inner: loop {
3180
0
                // Hash the current item
3181
0
                let hash = hasher(*guard, i);
3182
0
3183
0
                // Search for a suitable place to put it
3184
0
                //
3185
0
                // SAFETY: Caller of this function ensures that the control bytes
3186
0
                // are properly initialized.
3187
0
                let new_i = guard.find_insert_slot(hash).index;
3188
0
3189
0
                // Probing works by scanning through all of the control
3190
0
                // bytes in groups, which may not be aligned to the group
3191
0
                // size. If both the new and old position fall within the
3192
0
                // same unaligned group, then there is no benefit in moving
3193
0
                // it and we can just continue to the next item.
3194
0
                if likely(guard.is_in_same_group(i, new_i, hash)) {
3195
0
                    guard.set_ctrl_h2(i, hash);
3196
0
                    continue 'outer;
3197
0
                }
3198
0
3199
0
                let new_i_p = guard.bucket_ptr(new_i, size_of);
3200
0
3201
0
                // We are moving the current item to a new position. Write
3202
0
                // our H2 to the control byte of the new position.
3203
0
                let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
3204
0
                if prev_ctrl == EMPTY {
3205
0
                    guard.set_ctrl(i, EMPTY);
3206
0
                    // If the target slot is empty, simply move the current
3207
0
                    // element into the new slot and clear the old control
3208
0
                    // byte.
3209
0
                    ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
3210
0
                    continue 'outer;
3211
                } else {
3212
                    // If the target slot is occupied, swap the two elements
3213
                    // and then continue processing the element that we just
3214
                    // swapped into the old slot.
3215
0
                    debug_assert_eq!(prev_ctrl, DELETED);
3216
0
                    ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
3217
0
                    continue 'inner;
3218
                }
3219
            }
3220
        }
3221
3222
0
        guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
3223
0
3224
0
        mem::forget(guard);
3225
0
    }
3226
3227
    /// Deallocates the table without dropping any entries.
3228
    ///
3229
    /// # Note
3230
    ///
3231
    /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
3232
    /// else it can lead to leaking of memory. Also calling this function automatically
3233
    /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
3234
    /// (dangling) the `ctrl` field of the table.
3235
    ///
3236
    /// # Safety
3237
    ///
3238
    /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
3239
    ///
3240
    /// * The [`RawTableInner`] has already been allocated;
3241
    ///
3242
    /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
3243
    ///   to allocate this table.
3244
    ///
3245
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
3246
    ///   to allocate this table.
3247
    ///
3248
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3249
    ///
3250
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3251
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3252
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3253
    #[inline]
3254
0
    unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
3255
0
    where
3256
0
        A: Allocator,
3257
0
    {
3258
0
        // SAFETY: The caller must uphold the safety contract for `free_buckets`
3259
0
        // method.
3260
0
        let (ptr, layout) = self.allocation_info(table_layout);
3261
0
        alloc.deallocate(ptr, layout);
3262
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::free_buckets::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::free_buckets::<hashbrown::raw::inner::alloc::inner::Global>
3263
3264
    /// Returns a pointer to the allocated memory and the layout that was used to
3265
    /// allocate the table.
3266
    ///
3267
    /// # Safety
3268
    ///
3269
    /// Caller of this function must observe the following safety rules:
3270
    ///
3271
    /// * The [`RawTableInner`] has already been allocated, otherwise
3272
    ///   calling this function results in [`undefined behavior`]
3273
    ///
3274
    /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3275
    ///   that was used to allocate this table. Failure to comply with this condition
3276
    ///   may result in [`undefined behavior`].
3277
    ///
3278
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3279
    ///
3280
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3281
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3282
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3283
    #[inline]
3284
0
    unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3285
0
        debug_assert!(
3286
0
            !self.is_empty_singleton(),
3287
0
            "this function can only be called on non-empty tables"
3288
        );
3289
3290
        // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
3291
0
        let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
3292
0
            Some(lco) => lco,
3293
0
            None => unsafe { hint::unreachable_unchecked() },
3294
        };
3295
0
        (
3296
0
            // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
3297
0
            unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
3298
0
            layout,
3299
0
        )
3300
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::allocation_info
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::allocation_info
3301
3302
    /// Returns a pointer to the allocated memory and the layout that was used to
3303
    /// allocate the table. If [`RawTableInner`] has not been allocated, this
3304
    /// function return `dangling` pointer and `()` (unit) layout.
3305
    ///
3306
    /// # Safety
3307
    ///
3308
    /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3309
    /// that was used to allocate this table. Failure to comply with this condition
3310
    /// may result in [`undefined behavior`].
3311
    ///
3312
    /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more  information.
3313
    ///
3314
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3315
    /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3316
    /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3317
    #[cfg(feature = "raw")]
3318
0
    unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3319
0
        if self.is_empty_singleton() {
3320
0
            (NonNull::dangling(), Layout::new::<()>())
3321
        } else {
3322
            // SAFETY:
3323
            // 1. We have checked that our table is allocated.
3324
            // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
3325
            // that was used to allocate this table.
3326
0
            unsafe { self.allocation_info(table_layout) }
3327
        }
3328
0
    }
3329
3330
    /// Marks all table buckets as empty without dropping their contents.
3331
    #[inline]
3332
0
    fn clear_no_drop(&mut self) {
3333
0
        if !self.is_empty_singleton() {
3334
0
            unsafe {
3335
0
                self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
3336
0
            }
3337
0
        }
3338
0
        self.items = 0;
3339
0
        self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
3340
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::clear_no_drop
Unexecuted instantiation: <hashbrown::raw::inner::RawTableInner>::clear_no_drop
3341
3342
    /// Erases the [`Bucket`]'s control byte at the given index so that it does not
3343
    /// triggered as full, decreases the `items` of the table and, if it can be done,
3344
    /// increases `self.growth_left`.
3345
    ///
3346
    /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
3347
    /// does not make any changes to the `data` parts of the table. The caller of this
3348
    /// function must take care to properly drop the `data`, otherwise calling this
3349
    /// function may result in a memory leak.
3350
    ///
3351
    /// # Safety
3352
    ///
3353
    /// You must observe the following safety rules when calling this function:
3354
    ///
3355
    /// * The [`RawTableInner`] has already been allocated;
3356
    ///
3357
    /// * It must be the full control byte at the given position;
3358
    ///
3359
    /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
3360
    ///   `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
3361
    ///   be no greater than the number returned by the function [`RawTableInner::buckets`].
3362
    ///
3363
    /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
3364
    ///
3365
    /// Calling this function on a table with no elements is unspecified, but calling subsequent
3366
    /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
3367
    /// (`self.items -= 1 cause overflow when self.items == 0`).
3368
    ///
3369
    /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
3370
    /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
3371
    ///
3372
    /// [`RawTableInner::buckets`]: RawTableInner::buckets
3373
    /// [`Bucket::as_ptr`]: Bucket::as_ptr
3374
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3375
    #[inline]
3376
0
    unsafe fn erase(&mut self, index: usize) {
3377
0
        debug_assert!(self.is_bucket_full(index));
3378
3379
        // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3380
        // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3381
0
        let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3382
0
        // SAFETY:
3383
0
        // - The caller must uphold the safety contract for `erase` method;
3384
0
        // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3385
0
        let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3386
0
        let empty_after = Group::load(self.ctrl(index)).match_empty();
3387
3388
        // Inserting and searching in the map is performed by two key functions:
3389
        //
3390
        // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED`
3391
        //   slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED`
3392
        //   slot immediately in the first group, it jumps to the next `Group` looking for it,
3393
        //   and so on until it has gone through all the groups in the control bytes.
3394
        //
3395
        // - The `find_inner` function that looks for the index of the desired element by looking
3396
        //   at all the `FULL` bytes in the group. If it did not find the element right away, and
3397
        //   there is no `EMPTY` byte in the group, then this means that the `find_insert_slot`
3398
        //   function may have found a suitable slot in the next group. Therefore, `find_inner`
3399
        //   jumps further, and if it does not find the desired element and again there is no `EMPTY`
3400
        //   byte, then it jumps further, and so on. The search stops only if `find_inner` function
3401
        //   finds the desired element or hits an `EMPTY` slot/byte.
3402
        //
3403
        // Accordingly, this leads to two consequences:
3404
        //
3405
        // - The map must have `EMPTY` slots (bytes);
3406
        //
3407
        // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner`
3408
        //   function may stumble upon an `EMPTY` byte before finding the desired element and stop
3409
        //   searching.
3410
        //
3411
        // Thus it is necessary to check all bytes after and before the erased element. If we are in
3412
        // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes
3413
        // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3414
        // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3415
        // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3416
        // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well.
3417
        //
3418
        // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3419
        // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3420
        // cannot have `DELETED` bytes.
3421
        //
3422
        // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3423
        // `trailing_zeros` refers to the bytes at the beginning of a group.
3424
0
        let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3425
0
            DELETED
3426
        } else {
3427
0
            self.growth_left += 1;
3428
0
            EMPTY
3429
        };
3430
        // SAFETY: the caller must uphold the safety contract for `erase` method.
3431
0
        self.set_ctrl(index, ctrl);
3432
0
        self.items -= 1;
3433
0
    }
3434
}
3435
3436
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
3437
0
    fn clone(&self) -> Self {
3438
0
        if self.table.is_empty_singleton() {
3439
0
            Self::new_in(self.alloc.clone())
3440
        } else {
3441
            unsafe {
3442
                // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3443
                //
3444
                // SAFETY: This is safe as we are taking the size of an already allocated table
3445
                // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
3446
                // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3447
0
                let mut new_table = match Self::new_uninitialized(
3448
0
                    self.alloc.clone(),
3449
0
                    self.table.buckets(),
3450
0
                    Fallibility::Infallible,
3451
0
                ) {
3452
0
                    Ok(table) => table,
3453
0
                    Err(_) => hint::unreachable_unchecked(),
3454
                };
3455
3456
                // Cloning elements may fail (the clone function may panic). But we don't
3457
                // need to worry about uninitialized control bits, since:
3458
                // 1. The number of items (elements) in the table is zero, which means that
3459
                //    the control bits will not be readed by Drop function.
3460
                // 2. The `clone_from_spec` method will first copy all control bits from
3461
                //    `self` (thus initializing them). But this will not affect the `Drop`
3462
                //    function, since the `clone_from_spec` function sets `items` only after
3463
                //    successfully clonning all elements.
3464
0
                new_table.clone_from_spec(self);
3465
0
                new_table
3466
            }
3467
        }
3468
0
    }
3469
3470
0
    fn clone_from(&mut self, source: &Self) {
3471
0
        if source.table.is_empty_singleton() {
3472
0
            let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3473
0
            unsafe {
3474
0
                // SAFETY:
3475
0
                // 1. We call the function only once;
3476
0
                // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3477
0
                //    and [`TableLayout`] that were used to allocate this table.
3478
0
                // 3. If any elements' drop function panics, then there will only be a memory leak,
3479
0
                //    because we have replaced the inner table with a new one.
3480
0
                old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3481
0
            }
3482
        } else {
3483
            unsafe {
3484
                // Make sure that if any panics occurs, we clear the table and
3485
                // leave it in an empty state.
3486
0
                let mut self_ = guard(self, |self_| {
3487
0
                    self_.clear_no_drop();
3488
0
                });
3489
0
3490
0
                // First, drop all our elements without clearing the control
3491
0
                // bytes. If this panics then the scope guard will clear the
3492
0
                // table, leaking any elements that were not dropped yet.
3493
0
                //
3494
0
                // This leak is unavoidable: we can't try dropping more elements
3495
0
                // since this could lead to another panic and abort the process.
3496
0
                //
3497
0
                // SAFETY: If something gets wrong we clear our table right after
3498
0
                // dropping the elements, so there is no double drop, since `items`
3499
0
                // will be equal to zero.
3500
0
                self_.table.drop_elements::<T>();
3501
0
3502
0
                // If necessary, resize our table to match the source.
3503
0
                if self_.buckets() != source.buckets() {
3504
0
                    let new_inner = match RawTableInner::new_uninitialized(
3505
0
                        &self_.alloc,
3506
0
                        Self::TABLE_LAYOUT,
3507
0
                        source.buckets(),
3508
0
                        Fallibility::Infallible,
3509
0
                    ) {
3510
0
                        Ok(table) => table,
3511
0
                        Err(_) => hint::unreachable_unchecked(),
3512
                    };
3513
                    // Replace the old inner with new uninitialized one. It's ok, since if something gets
3514
                    // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3515
0
                    let mut old_inner = mem::replace(&mut self_.table, new_inner);
3516
0
                    if !old_inner.is_empty_singleton() {
3517
0
                        // SAFETY:
3518
0
                        // 1. We have checked that our table is allocated.
3519
0
                        // 2. We know for sure that `alloc` and `table_layout` matches
3520
0
                        // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3521
0
                        old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3522
0
                    }
3523
0
                }
3524
3525
                // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3526
                // inside the `clone_from_impl` function will take care of that, dropping all
3527
                // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3528
0
                self_.clone_from_spec(source);
3529
0
3530
0
                // Disarm the scope guard if cloning was successful.
3531
0
                ScopeGuard::into_inner(self_);
3532
            }
3533
        }
3534
0
    }
3535
}
3536
3537
/// Specialization of `clone_from` for `Copy` types
3538
trait RawTableClone {
3539
    unsafe fn clone_from_spec(&mut self, source: &Self);
3540
}
3541
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3542
    default_fn! {
3543
        #[cfg_attr(feature = "inline-more", inline)]
3544
0
        unsafe fn clone_from_spec(&mut self, source: &Self) {
3545
0
            self.clone_from_impl(source);
3546
0
        }
3547
    }
3548
}
3549
#[cfg(feature = "nightly")]
3550
impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3551
    #[cfg_attr(feature = "inline-more", inline)]
3552
    unsafe fn clone_from_spec(&mut self, source: &Self) {
3553
        source
3554
            .table
3555
            .ctrl(0)
3556
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3557
        source
3558
            .data_start()
3559
            .as_ptr()
3560
            .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3561
3562
        self.table.items = source.table.items;
3563
        self.table.growth_left = source.table.growth_left;
3564
    }
3565
}
3566
3567
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
3568
    /// Common code for clone and clone_from. Assumes:
3569
    /// - `self.buckets() == source.buckets()`.
3570
    /// - Any existing elements have been dropped.
3571
    /// - The control bytes are not initialized yet.
3572
    #[cfg_attr(feature = "inline-more", inline)]
3573
0
    unsafe fn clone_from_impl(&mut self, source: &Self) {
3574
0
        // Copy the control bytes unchanged. We do this in a single pass
3575
0
        source
3576
0
            .table
3577
0
            .ctrl(0)
3578
0
            .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3579
0
3580
0
        // The cloning of elements may panic, in which case we need
3581
0
        // to make sure we drop only the elements that have been
3582
0
        // cloned so far.
3583
0
        let mut guard = guard((0, &mut *self), |(index, self_)| {
3584
0
            if T::NEEDS_DROP {
3585
0
                for i in 0..*index {
3586
0
                    if self_.is_bucket_full(i) {
3587
0
                        self_.bucket(i).drop();
3588
0
                    }
3589
                }
3590
0
            }
3591
0
        });
3592
3593
0
        for from in source.iter() {
3594
0
            let index = source.bucket_index(&from);
3595
0
            let to = guard.1.bucket(index);
3596
0
            to.write(from.as_ref().clone());
3597
0
3598
0
            // Update the index in case we need to unwind.
3599
0
            guard.0 = index + 1;
3600
0
        }
3601
3602
        // Successfully cloned all items, no need to clean up.
3603
0
        mem::forget(guard);
3604
0
3605
0
        self.table.items = source.table.items;
3606
0
        self.table.growth_left = source.table.growth_left;
3607
0
    }
3608
3609
    /// Variant of `clone_from` to use when a hasher is available.
3610
    #[cfg(feature = "raw")]
3611
0
    pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
3612
0
        // If we have enough capacity in the table, just clear it and insert
3613
0
        // elements one by one. We don't do this if we have the same number of
3614
0
        // buckets as the source since we can just copy the contents directly
3615
0
        // in that case.
3616
0
        if self.table.buckets() != source.table.buckets()
3617
0
            && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len()
3618
        {
3619
0
            self.clear();
3620
0
3621
0
            let mut guard_self = guard(&mut *self, |self_| {
3622
0
                // Clear the partially copied table if a panic occurs, otherwise
3623
0
                // items and growth_left will be out of sync with the contents
3624
0
                // of the table.
3625
0
                self_.clear();
3626
0
            });
3627
3628
            unsafe {
3629
0
                for item in source.iter() {
3630
0
                    // This may panic.
3631
0
                    let item = item.as_ref().clone();
3632
0
                    let hash = hasher(&item);
3633
0
3634
0
                    // We can use a simpler version of insert() here since:
3635
0
                    // - there are no DELETED entries.
3636
0
                    // - we know there is enough space in the table.
3637
0
                    // - all elements are unique.
3638
0
                    let (index, _) = guard_self.table.prepare_insert_slot(hash);
3639
0
                    guard_self.bucket(index).write(item);
3640
0
                }
3641
            }
3642
3643
            // Successfully cloned all items, no need to clean up.
3644
0
            mem::forget(guard_self);
3645
0
3646
0
            self.table.items = source.table.items;
3647
0
            self.table.growth_left -= source.table.items;
3648
0
        } else {
3649
0
            self.clone_from(source);
3650
0
        }
3651
0
    }
3652
}
3653
3654
impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3655
    #[inline]
3656
0
    fn default() -> Self {
3657
0
        Self::new_in(Default::default())
3658
0
    }
3659
}
3660
3661
#[cfg(feature = "nightly")]
3662
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3663
    #[cfg_attr(feature = "inline-more", inline)]
3664
    fn drop(&mut self) {
3665
        unsafe {
3666
            // SAFETY:
3667
            // 1. We call the function only once;
3668
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3669
            //    and [`TableLayout`] that were used to allocate this table.
3670
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3671
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3672
            //    so there won't be any table left in an inconsistent state.
3673
            self.table
3674
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3675
        }
3676
    }
3677
}
3678
#[cfg(not(feature = "nightly"))]
3679
impl<T, A: Allocator> Drop for RawTable<T, A> {
3680
    #[cfg_attr(feature = "inline-more", inline)]
3681
0
    fn drop(&mut self) {
3682
0
        unsafe {
3683
0
            // SAFETY:
3684
0
            // 1. We call the function only once;
3685
0
            // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3686
0
            //    and [`TableLayout`] that were used to allocate this table.
3687
0
            // 3. If the drop function of any elements fails, then only a memory leak will occur,
3688
0
            //    and we don't care because we are inside the `Drop` function of the `RawTable`,
3689
0
            //    so there won't be any table left in an inconsistent state.
3690
0
            self.table
3691
0
                .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3692
0
        }
3693
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<_, _> as core::ops::drop::Drop>::drop
Unexecuted instantiation: <hashbrown::raw::inner::RawTable<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)> as core::ops::drop::Drop>::drop
3694
}
3695
3696
impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3697
    type Item = T;
3698
    type IntoIter = RawIntoIter<T, A>;
3699
3700
    #[cfg_attr(feature = "inline-more", inline)]
3701
0
    fn into_iter(self) -> RawIntoIter<T, A> {
3702
0
        unsafe {
3703
0
            let iter = self.iter();
3704
0
            self.into_iter_from(iter)
3705
0
        }
3706
0
    }
3707
}
3708
3709
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3710
/// not track an item count.
3711
pub(crate) struct RawIterRange<T> {
3712
    // Mask of full buckets in the current group. Bits are cleared from this
3713
    // mask as each element is processed.
3714
    current_group: BitMaskIter,
3715
3716
    // Pointer to the buckets for the current group.
3717
    data: Bucket<T>,
3718
3719
    // Pointer to the next group of control bytes,
3720
    // Must be aligned to the group size.
3721
    next_ctrl: *const u8,
3722
3723
    // Pointer one past the last control byte of this range.
3724
    end: *const u8,
3725
}
3726
3727
impl<T> RawIterRange<T> {
3728
    /// Returns a `RawIterRange` covering a subset of a table.
3729
    ///
3730
    /// # Safety
3731
    ///
3732
    /// If any of the following conditions are violated, the result is
3733
    /// [`undefined behavior`]:
3734
    ///
3735
    /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3736
    ///
3737
    /// * `ctrl` must be properly aligned to the group size (Group::WIDTH);
3738
    ///
3739
    /// * `ctrl` must point to the array of properly initialized control bytes;
3740
    ///
3741
    /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3742
    ///
3743
    /// * the value of `len` must be less than or equal to the number of table buckets,
3744
    ///   and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3745
    ///   must be positive.
3746
    ///
3747
    /// * The `ctrl.add(len)` pointer must be either in bounds or one
3748
    ///   byte past the end of the same [allocated table].
3749
    ///
3750
    /// * The `len` must be a power of two.
3751
    ///
3752
    /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
3753
    /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3754
    #[cfg_attr(feature = "inline-more", inline)]
3755
0
    unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3756
0
        debug_assert_ne!(len, 0);
3757
0
        debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3758
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3759
0
        let end = ctrl.add(len);
3760
0
3761
0
        // Load the first group and advance ctrl to point to the next group
3762
0
        // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3763
0
        let current_group = Group::load_aligned(ctrl).match_full();
3764
0
        let next_ctrl = ctrl.add(Group::WIDTH);
3765
0
3766
0
        Self {
3767
0
            current_group: current_group.into_iter(),
3768
0
            data,
3769
0
            next_ctrl,
3770
0
            end,
3771
0
        }
3772
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawIterRange<_>>::new
Unexecuted instantiation: <hashbrown::raw::inner::RawIterRange<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::new
Unexecuted instantiation: <hashbrown::raw::inner::RawIterRange<(gix_hash::object_id::ObjectId, u32)>>::new
3773
3774
    /// Splits a `RawIterRange` into two halves.
3775
    ///
3776
    /// Returns `None` if the remaining range is smaller than or equal to the
3777
    /// group width.
3778
    #[cfg_attr(feature = "inline-more", inline)]
3779
    #[cfg(feature = "rayon")]
3780
    pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3781
        unsafe {
3782
            if self.end <= self.next_ctrl {
3783
                // Nothing to split if the group that we are current processing
3784
                // is the last one.
3785
                (self, None)
3786
            } else {
3787
                // len is the remaining number of elements after the group that
3788
                // we are currently processing. It must be a multiple of the
3789
                // group size (small tables are caught by the check above).
3790
                let len = offset_from(self.end, self.next_ctrl);
3791
                debug_assert_eq!(len % Group::WIDTH, 0);
3792
3793
                // Split the remaining elements into two halves, but round the
3794
                // midpoint down in case there is an odd number of groups
3795
                // remaining. This ensures that:
3796
                // - The tail is at least 1 group long.
3797
                // - The split is roughly even considering we still have the
3798
                //   current group to process.
3799
                let mid = (len / 2) & !(Group::WIDTH - 1);
3800
3801
                let tail = Self::new(
3802
                    self.next_ctrl.add(mid),
3803
                    self.data.next_n(Group::WIDTH).next_n(mid),
3804
                    len - mid,
3805
                );
3806
                debug_assert_eq!(
3807
                    self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3808
                    tail.data.ptr
3809
                );
3810
                debug_assert_eq!(self.end, tail.end);
3811
                self.end = self.next_ctrl.add(mid);
3812
                debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3813
                (self, Some(tail))
3814
            }
3815
        }
3816
    }
3817
3818
    /// # Safety
3819
    /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate
3820
    /// after yielding all elements.
3821
    #[cfg_attr(feature = "inline-more", inline)]
3822
0
    unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3823
        loop {
3824
0
            if let Some(index) = self.current_group.next() {
3825
0
                return Some(self.data.next_n(index));
3826
0
            }
3827
0
3828
0
            if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3829
0
                return None;
3830
0
            }
3831
0
3832
0
            // We might read past self.end up to the next group boundary,
3833
0
            // but this is fine because it only occurs on tables smaller
3834
0
            // than the group size where the trailing control bytes are all
3835
0
            // EMPTY. On larger tables self.end is guaranteed to be aligned
3836
0
            // to the group size (since tables are power-of-two sized).
3837
0
            self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
3838
0
            self.data = self.data.next_n(Group::WIDTH);
3839
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3840
        }
3841
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawIterRange<_>>::next_impl::<_>
Unexecuted instantiation: <hashbrown::raw::inner::RawIterRange<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)>>::next_impl::<false>
Unexecuted instantiation: <hashbrown::raw::inner::RawIterRange<(gix_hash::object_id::ObjectId, u32)>>::next_impl::<false>
3842
3843
    /// Folds every element into an accumulator by applying an operation,
3844
    /// returning the final result.
3845
    ///
3846
    /// `fold_impl()` takes three arguments: the number of items remaining in
3847
    /// the iterator, an initial value, and a closure with two arguments: an
3848
    /// 'accumulator', and an element. The closure returns the value that the
3849
    /// accumulator should have for the next iteration.
3850
    ///
3851
    /// The initial value is the value the accumulator will have on the first call.
3852
    ///
3853
    /// After applying this closure to every element of the iterator, `fold_impl()`
3854
    /// returns the accumulator.
3855
    ///
3856
    /// # Safety
3857
    ///
3858
    /// If any of the following conditions are violated, the result is
3859
    /// [`Undefined Behavior`]:
3860
    ///
3861
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3862
    ///   i.e. table outlives the `RawIterRange`;
3863
    ///
3864
    /// * The provided `n` value must match the actual number of items
3865
    ///   in the table.
3866
    ///
3867
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3868
    #[allow(clippy::while_let_on_iterator)]
3869
    #[cfg_attr(feature = "inline-more", inline)]
3870
0
    unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
3871
0
    where
3872
0
        F: FnMut(B, Bucket<T>) -> B,
3873
0
    {
3874
        loop {
3875
0
            while let Some(index) = self.current_group.next() {
3876
                // The returned `index` will always be in the range `0..Group::WIDTH`,
3877
                // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
3878
0
                debug_assert!(n != 0);
3879
0
                let bucket = self.data.next_n(index);
3880
0
                acc = f(acc, bucket);
3881
0
                n -= 1;
3882
            }
3883
3884
0
            if n == 0 {
3885
0
                return acc;
3886
0
            }
3887
0
3888
0
            // SAFETY: The caller of this function ensures that:
3889
0
            //
3890
0
            // 1. The provided `n` value matches the actual number of items in the table;
3891
0
            // 2. The table is alive and did not moved.
3892
0
            //
3893
0
            // Taking the above into account, we always stay within the bounds, because:
3894
0
            //
3895
0
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3896
0
            //    we will never end up in the given branch, since we should have already
3897
0
            //    yielded all the elements of the table.
3898
0
            //
3899
0
            // 2. For tables larger than the group width. The number of buckets is a
3900
0
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
3901
0
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3902
0
            //    start of the array of control bytes, and never try to iterate after
3903
0
            //    getting all the elements, the last `self.current_group` will read bytes
3904
0
            //    from the `self.buckets() - Group::WIDTH` index.  We know also that
3905
0
            //    `self.current_group.next()` will always retun indices within the range
3906
0
            //    `0..Group::WIDTH`.
3907
0
            //
3908
0
            //    Knowing all of the above and taking into account that we are synchronizing
3909
0
            //    the `self.data` index with the index we used to read the `self.current_group`,
3910
0
            //    the subsequent `self.data.next_n(index)` will always return a bucket with
3911
0
            //    an index number less than `self.buckets()`.
3912
0
            //
3913
0
            //    The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
3914
0
            //    actually be read, since we should have already yielded all the elements of
3915
0
            //    the table.
3916
0
            self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
3917
0
            self.data = self.data.next_n(Group::WIDTH);
3918
0
            self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3919
        }
3920
0
    }
3921
}
3922
3923
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3924
// in the actual iterator implementations determine the real Send/Sync bounds.
3925
unsafe impl<T> Send for RawIterRange<T> {}
3926
unsafe impl<T> Sync for RawIterRange<T> {}
3927
3928
impl<T> Clone for RawIterRange<T> {
3929
    #[cfg_attr(feature = "inline-more", inline)]
3930
0
    fn clone(&self) -> Self {
3931
0
        Self {
3932
0
            data: self.data.clone(),
3933
0
            next_ctrl: self.next_ctrl,
3934
0
            current_group: self.current_group,
3935
0
            end: self.end,
3936
0
        }
3937
0
    }
3938
}
3939
3940
impl<T> Iterator for RawIterRange<T> {
3941
    type Item = Bucket<T>;
3942
3943
    #[cfg_attr(feature = "inline-more", inline)]
3944
0
    fn next(&mut self) -> Option<Bucket<T>> {
3945
0
        unsafe {
3946
0
            // SAFETY: We set checker flag to true.
3947
0
            self.next_impl::<true>()
3948
0
        }
3949
0
    }
3950
3951
    #[inline]
3952
0
    fn size_hint(&self) -> (usize, Option<usize>) {
3953
        // We don't have an item count, so just guess based on the range size.
3954
0
        let remaining_buckets = if self.end > self.next_ctrl {
3955
0
            unsafe { offset_from(self.end, self.next_ctrl) }
3956
        } else {
3957
0
            0
3958
        };
3959
3960
        // Add a group width to include the group we are currently processing.
3961
0
        (0, Some(Group::WIDTH + remaining_buckets))
3962
0
    }
3963
}
3964
3965
impl<T> FusedIterator for RawIterRange<T> {}
3966
3967
/// Iterator which returns a raw pointer to every full bucket in the table.
3968
///
3969
/// For maximum flexibility this iterator is not bound by a lifetime, but you
3970
/// must observe several rules when using it:
3971
/// - You must not free the hash table while iterating (including via growing/shrinking).
3972
/// - It is fine to erase a bucket that has been yielded by the iterator.
3973
/// - Erasing a bucket that has not yet been yielded by the iterator may still
3974
///   result in the iterator yielding that bucket (unless `reflect_remove` is called).
3975
/// - It is unspecified whether an element inserted after the iterator was
3976
///   created will be yielded by that iterator (unless `reflect_insert` is called).
3977
/// - The order in which the iterator yields bucket is unspecified and may
3978
///   change in the future.
3979
pub struct RawIter<T> {
3980
    pub(crate) iter: RawIterRange<T>,
3981
    items: usize,
3982
}
3983
3984
impl<T> RawIter<T> {
3985
    /// Refresh the iterator so that it reflects a removal from the given bucket.
3986
    ///
3987
    /// For the iterator to remain valid, this method must be called once
3988
    /// for each removed bucket before `next` is called again.
3989
    ///
3990
    /// This method should be called _before_ the removal is made. It is not necessary to call this
3991
    /// method if you are removing an item that this iterator yielded in the past.
3992
    #[cfg(feature = "raw")]
3993
0
    pub unsafe fn reflect_remove(&mut self, b: &Bucket<T>) {
3994
0
        self.reflect_toggle_full(b, false);
3995
0
    }
3996
3997
    /// Refresh the iterator so that it reflects an insertion into the given bucket.
3998
    ///
3999
    /// For the iterator to remain valid, this method must be called once
4000
    /// for each insert before `next` is called again.
4001
    ///
4002
    /// This method does not guarantee that an insertion of a bucket with a greater
4003
    /// index than the last one yielded will be reflected in the iterator.
4004
    ///
4005
    /// This method should be called _after_ the given insert is made.
4006
    #[cfg(feature = "raw")]
4007
0
    pub unsafe fn reflect_insert(&mut self, b: &Bucket<T>) {
4008
0
        self.reflect_toggle_full(b, true);
4009
0
    }
4010
4011
    /// Refresh the iterator so that it reflects a change to the state of the given bucket.
4012
    #[cfg(feature = "raw")]
4013
0
    unsafe fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
4014
0
        if b.as_ptr() > self.iter.data.as_ptr() {
4015
            // The iterator has already passed the bucket's group.
4016
            // So the toggle isn't relevant to this iterator.
4017
0
            return;
4018
0
        }
4019
0
4020
0
        if self.iter.next_ctrl < self.iter.end
4021
0
            && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
4022
        {
4023
            // The iterator has not yet reached the bucket's group.
4024
            // We don't need to reload anything, but we do need to adjust the item count.
4025
4026
0
            if cfg!(debug_assertions) {
4027
                // Double-check that the user isn't lying to us by checking the bucket state.
4028
                // To do that, we need to find its control byte. We know that self.iter.data is
4029
                // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
4030
0
                let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
4031
0
                let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
4032
0
                // This method should be called _before_ a removal, or _after_ an insert,
4033
0
                // so in both cases the ctrl byte should indicate that the bucket is full.
4034
0
                assert!(is_full(*ctrl));
4035
0
            }
4036
4037
0
            if is_insert {
4038
0
                self.items += 1;
4039
0
            } else {
4040
0
                self.items -= 1;
4041
0
            }
4042
4043
0
            return;
4044
0
        }
4045
4046
        // The iterator is at the bucket group that the toggled bucket is in.
4047
        // We need to do two things:
4048
        //
4049
        //  - Determine if the iterator already yielded the toggled bucket.
4050
        //    If it did, we're done.
4051
        //  - Otherwise, update the iterator cached group so that it won't
4052
        //    yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
4053
        //    We'll also need to update the item count accordingly.
4054
0
        if let Some(index) = self.iter.current_group.0.lowest_set_bit() {
4055
0
            let next_bucket = self.iter.data.next_n(index);
4056
0
            if b.as_ptr() > next_bucket.as_ptr() {
4057
0
                // The toggled bucket is "before" the bucket the iterator would yield next. We
4058
0
                // therefore don't need to do anything --- the iterator has already passed the
4059
0
                // bucket in question.
4060
0
                //
4061
0
                // The item count must already be correct, since a removal or insert "prior" to
4062
0
                // the iterator's position wouldn't affect the item count.
4063
0
            } else {
4064
                // The removed bucket is an upcoming bucket. We need to make sure it does _not_
4065
                // get yielded, and also that it's no longer included in the item count.
4066
                //
4067
                // NOTE: We can't just reload the group here, both since that might reflect
4068
                // inserts we've already passed, and because that might inadvertently unset the
4069
                // bits for _other_ removals. If we do that, we'd have to also decrement the
4070
                // item count for those other bits that we unset. But the presumably subsequent
4071
                // call to reflect for those buckets might _also_ decrement the item count.
4072
                // Instead, we _just_ flip the bit for the particular bucket the caller asked
4073
                // us to reflect.
4074
0
                let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
4075
0
                let was_full = self.iter.current_group.flip(our_bit);
4076
0
                debug_assert_ne!(was_full, is_insert);
4077
4078
0
                if is_insert {
4079
0
                    self.items += 1;
4080
0
                } else {
4081
0
                    self.items -= 1;
4082
0
                }
4083
4084
0
                if cfg!(debug_assertions) {
4085
0
                    if b.as_ptr() == next_bucket.as_ptr() {
4086
                        // The removed bucket should no longer be next
4087
0
                        debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index));
4088
                    } else {
4089
                        // We should not have changed what bucket comes next.
4090
0
                        debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index));
4091
                    }
4092
0
                }
4093
            }
4094
0
        } else {
4095
0
            // We must have already iterated past the removed item.
4096
0
        }
4097
0
    }
4098
4099
0
    unsafe fn drop_elements(&mut self) {
4100
0
        if T::NEEDS_DROP && self.items != 0 {
4101
0
            for item in self {
4102
0
                item.drop();
4103
0
            }
4104
0
        }
4105
0
    }
4106
}
4107
4108
impl<T> Clone for RawIter<T> {
4109
    #[cfg_attr(feature = "inline-more", inline)]
4110
0
    fn clone(&self) -> Self {
4111
0
        Self {
4112
0
            iter: self.iter.clone(),
4113
0
            items: self.items,
4114
0
        }
4115
0
    }
4116
}
4117
4118
impl<T> Iterator for RawIter<T> {
4119
    type Item = Bucket<T>;
4120
4121
    #[cfg_attr(feature = "inline-more", inline)]
4122
0
    fn next(&mut self) -> Option<Bucket<T>> {
4123
0
        // Inner iterator iterates over buckets
4124
0
        // so it can do unnecessary work if we already yielded all items.
4125
0
        if self.items == 0 {
4126
0
            return None;
4127
0
        }
4128
0
4129
0
        let nxt = unsafe {
4130
0
            // SAFETY: We check number of items to yield using `items` field.
4131
0
            self.iter.next_impl::<false>()
4132
0
        };
4133
0
4134
0
        debug_assert!(nxt.is_some());
4135
0
        self.items -= 1;
4136
0
4137
0
        nxt
4138
0
    }
Unexecuted instantiation: <hashbrown::raw::inner::RawIter<_> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::inner::RawIter<(gix_hash::object_id::ObjectId, alloc::borrow::Cow<bstr::bstr::BStr>)> as core::iter::traits::iterator::Iterator>::next
Unexecuted instantiation: <hashbrown::raw::inner::RawIter<(gix_hash::object_id::ObjectId, u32)> as core::iter::traits::iterator::Iterator>::next
4139
4140
    #[inline]
4141
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4142
0
        (self.items, Some(self.items))
4143
0
    }
4144
4145
    #[inline]
4146
0
    fn fold<B, F>(self, init: B, f: F) -> B
4147
0
    where
4148
0
        Self: Sized,
4149
0
        F: FnMut(B, Self::Item) -> B,
4150
0
    {
4151
0
        unsafe { self.iter.fold_impl(self.items, init, f) }
4152
0
    }
4153
}
4154
4155
impl<T> ExactSizeIterator for RawIter<T> {}
4156
impl<T> FusedIterator for RawIter<T> {}
4157
4158
/// Iterator which returns an index of every full bucket in the table.
4159
///
4160
/// For maximum flexibility this iterator is not bound by a lifetime, but you
4161
/// must observe several rules when using it:
4162
/// - You must not free the hash table while iterating (including via growing/shrinking).
4163
/// - It is fine to erase a bucket that has been yielded by the iterator.
4164
/// - Erasing a bucket that has not yet been yielded by the iterator may still
4165
///   result in the iterator yielding index of that bucket.
4166
/// - It is unspecified whether an element inserted after the iterator was
4167
///   created will be yielded by that iterator.
4168
/// - The order in which the iterator yields indices of the buckets is unspecified
4169
///   and may change in the future.
4170
pub(crate) struct FullBucketsIndices {
4171
    // Mask of full buckets in the current group. Bits are cleared from this
4172
    // mask as each element is processed.
4173
    current_group: BitMaskIter,
4174
4175
    // Initial value of the bytes' indices of the current group (relative
4176
    // to the start of the control bytes).
4177
    group_first_index: usize,
4178
4179
    // Pointer to the current group of control bytes,
4180
    // Must be aligned to the group size (Group::WIDTH).
4181
    ctrl: NonNull<u8>,
4182
4183
    // Number of elements in the table.
4184
    items: usize,
4185
}
4186
4187
impl FullBucketsIndices {
4188
    /// Advances the iterator and returns the next value.
4189
    ///
4190
    /// # Safety
4191
    ///
4192
    /// If any of the following conditions are violated, the result is
4193
    /// [`Undefined Behavior`]:
4194
    ///
4195
    /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
4196
    ///   i.e. table outlives the `FullBucketsIndices`;
4197
    ///
4198
    /// * It never tries to iterate after getting all elements.
4199
    ///
4200
    /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
4201
    #[inline(always)]
4202
0
    unsafe fn next_impl(&mut self) -> Option<usize> {
4203
        loop {
4204
0
            if let Some(index) = self.current_group.next() {
4205
                // The returned `self.group_first_index + index` will always
4206
                // be in the range `0..self.buckets()`. See explanation below.
4207
0
                return Some(self.group_first_index + index);
4208
0
            }
4209
0
4210
0
            // SAFETY: The caller of this function ensures that:
4211
0
            //
4212
0
            // 1. It never tries to iterate after getting all the elements;
4213
0
            // 2. The table is alive and did not moved;
4214
0
            // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
4215
0
            //
4216
0
            // Taking the above into account, we always stay within the bounds, because:
4217
0
            //
4218
0
            // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
4219
0
            //    we will never end up in the given branch, since we should have already
4220
0
            //    yielded all the elements of the table.
4221
0
            //
4222
0
            // 2. For tables larger than the group width. The number of buckets is a
4223
0
            //    power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
4224
0
            //    `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
4225
0
            //    the start of the array of control bytes, and never try to iterate after
4226
0
            //    getting all the elements, the last `self.ctrl` will be equal to
4227
0
            //    the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
4228
0
            //    will always contains indices within the range `0..Group::WIDTH`,
4229
0
            //    and subsequent `self.group_first_index + index` will always return a
4230
0
            //    number less than `self.buckets()`.
4231
0
            self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
4232
0
4233
0
            // SAFETY: See explanation above.
4234
0
            self.current_group = Group::load_aligned(self.ctrl.as_ptr())
4235
0
                .match_full()
4236
0
                .into_iter();
4237
0
            self.group_first_index += Group::WIDTH;
4238
        }
4239
0
    }
4240
}
4241
4242
impl Iterator for FullBucketsIndices {
4243
    type Item = usize;
4244
4245
    /// Advances the iterator and returns the next value. It is up to
4246
    /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
4247
    /// because we cannot make the `next` method unsafe.
4248
    #[inline(always)]
4249
0
    fn next(&mut self) -> Option<usize> {
4250
0
        // Return if we already yielded all items.
4251
0
        if self.items == 0 {
4252
0
            return None;
4253
0
        }
4254
0
4255
0
        let nxt = unsafe {
4256
0
            // SAFETY:
4257
0
            // 1. We check number of items to yield using `items` field.
4258
0
            // 2. The caller ensures that the table is alive and has not moved.
4259
0
            self.next_impl()
4260
0
        };
4261
0
4262
0
        debug_assert!(nxt.is_some());
4263
0
        self.items -= 1;
4264
0
4265
0
        nxt
4266
0
    }
4267
4268
    #[inline(always)]
4269
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4270
0
        (self.items, Some(self.items))
4271
0
    }
4272
}
4273
4274
impl ExactSizeIterator for FullBucketsIndices {}
4275
impl FusedIterator for FullBucketsIndices {}
4276
4277
/// Iterator which consumes a table and returns elements.
4278
pub struct RawIntoIter<T, A: Allocator = Global> {
4279
    iter: RawIter<T>,
4280
    allocation: Option<(NonNull<u8>, Layout, A)>,
4281
    marker: PhantomData<T>,
4282
}
4283
4284
impl<T, A: Allocator> RawIntoIter<T, A> {
4285
    #[cfg_attr(feature = "inline-more", inline)]
4286
0
    pub fn iter(&self) -> RawIter<T> {
4287
0
        self.iter.clone()
4288
0
    }
4289
}
4290
4291
unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
4292
where
4293
    T: Send,
4294
    A: Send,
4295
{
4296
}
4297
unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
4298
where
4299
    T: Sync,
4300
    A: Sync,
4301
{
4302
}
4303
4304
#[cfg(feature = "nightly")]
4305
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
4306
    #[cfg_attr(feature = "inline-more", inline)]
4307
    fn drop(&mut self) {
4308
        unsafe {
4309
            // Drop all remaining elements
4310
            self.iter.drop_elements();
4311
4312
            // Free the table
4313
            if let Some((ptr, layout, ref alloc)) = self.allocation {
4314
                alloc.deallocate(ptr, layout);
4315
            }
4316
        }
4317
    }
4318
}
4319
#[cfg(not(feature = "nightly"))]
4320
impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
4321
    #[cfg_attr(feature = "inline-more", inline)]
4322
0
    fn drop(&mut self) {
4323
0
        unsafe {
4324
0
            // Drop all remaining elements
4325
0
            self.iter.drop_elements();
4326
4327
            // Free the table
4328
0
            if let Some((ptr, layout, ref alloc)) = self.allocation {
4329
0
                alloc.deallocate(ptr, layout);
4330
0
            }
4331
        }
4332
0
    }
4333
}
4334
4335
impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
4336
    type Item = T;
4337
4338
    #[cfg_attr(feature = "inline-more", inline)]
4339
0
    fn next(&mut self) -> Option<T> {
4340
0
        unsafe { Some(self.iter.next()?.read()) }
4341
0
    }
4342
4343
    #[inline]
4344
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4345
0
        self.iter.size_hint()
4346
0
    }
4347
}
4348
4349
impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
4350
impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
4351
4352
/// Iterator which consumes elements without freeing the table storage.
4353
pub struct RawDrain<'a, T, A: Allocator = Global> {
4354
    iter: RawIter<T>,
4355
4356
    // The table is moved into the iterator for the duration of the drain. This
4357
    // ensures that an empty table is left if the drain iterator is leaked
4358
    // without dropping.
4359
    table: RawTableInner,
4360
    orig_table: NonNull<RawTableInner>,
4361
4362
    // We don't use a &'a mut RawTable<T> because we want RawDrain to be
4363
    // covariant over T.
4364
    marker: PhantomData<&'a RawTable<T, A>>,
4365
}
4366
4367
impl<T, A: Allocator> RawDrain<'_, T, A> {
4368
    #[cfg_attr(feature = "inline-more", inline)]
4369
0
    pub fn iter(&self) -> RawIter<T> {
4370
0
        self.iter.clone()
4371
0
    }
4372
}
4373
4374
unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
4375
where
4376
    T: Send,
4377
    A: Send,
4378
{
4379
}
4380
unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
4381
where
4382
    T: Sync,
4383
    A: Sync,
4384
{
4385
}
4386
4387
impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
4388
    #[cfg_attr(feature = "inline-more", inline)]
4389
0
    fn drop(&mut self) {
4390
0
        unsafe {
4391
0
            // Drop all remaining elements. Note that this may panic.
4392
0
            self.iter.drop_elements();
4393
0
4394
0
            // Reset the contents of the table now that all elements have been
4395
0
            // dropped.
4396
0
            self.table.clear_no_drop();
4397
0
4398
0
            // Move the now empty table back to its original location.
4399
0
            self.orig_table
4400
0
                .as_ptr()
4401
0
                .copy_from_nonoverlapping(&self.table, 1);
4402
0
        }
4403
0
    }
4404
}
4405
4406
impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
4407
    type Item = T;
4408
4409
    #[cfg_attr(feature = "inline-more", inline)]
4410
0
    fn next(&mut self) -> Option<T> {
4411
        unsafe {
4412
0
            let item = self.iter.next()?;
4413
0
            Some(item.read())
4414
        }
4415
0
    }
4416
4417
    #[inline]
4418
0
    fn size_hint(&self) -> (usize, Option<usize>) {
4419
0
        self.iter.size_hint()
4420
0
    }
4421
}
4422
4423
impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
4424
impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
4425
4426
/// Iterator over occupied buckets that could match a given hash.
4427
///
4428
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
4429
/// items that have a hash value different than the one provided. You should
4430
/// always validate the returned values before using them.
4431
///
4432
/// For maximum flexibility this iterator is not bound by a lifetime, but you
4433
/// must observe several rules when using it:
4434
/// - You must not free the hash table while iterating (including via growing/shrinking).
4435
/// - It is fine to erase a bucket that has been yielded by the iterator.
4436
/// - Erasing a bucket that has not yet been yielded by the iterator may still
4437
///   result in the iterator yielding that bucket.
4438
/// - It is unspecified whether an element inserted after the iterator was
4439
///   created will be yielded by that iterator.
4440
/// - The order in which the iterator yields buckets is unspecified and may
4441
///   change in the future.
4442
pub struct RawIterHash<T> {
4443
    inner: RawIterHashInner,
4444
    _marker: PhantomData<T>,
4445
}
4446
4447
struct RawIterHashInner {
4448
    // See `RawTableInner`'s corresponding fields for details.
4449
    // We can't store a `*const RawTableInner` as it would get
4450
    // invalidated by the user calling `&mut` methods on `RawTable`.
4451
    bucket_mask: usize,
4452
    ctrl: NonNull<u8>,
4453
4454
    // The top 7 bits of the hash.
4455
    h2_hash: u8,
4456
4457
    // The sequence of groups to probe in the search.
4458
    probe_seq: ProbeSeq,
4459
4460
    group: Group,
4461
4462
    // The elements within the group with a matching h2-hash.
4463
    bitmask: BitMaskIter,
4464
}
4465
4466
impl<T> RawIterHash<T> {
4467
    #[cfg_attr(feature = "inline-more", inline)]
4468
    #[cfg(feature = "raw")]
4469
0
    unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
4470
0
        RawIterHash {
4471
0
            inner: RawIterHashInner::new(&table.table, hash),
4472
0
            _marker: PhantomData,
4473
0
        }
4474
0
    }
4475
}
4476
impl RawIterHashInner {
4477
    #[cfg_attr(feature = "inline-more", inline)]
4478
    #[cfg(feature = "raw")]
4479
0
    unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
4480
0
        let h2_hash = h2(hash);
4481
0
        let probe_seq = table.probe_seq(hash);
4482
0
        let group = Group::load(table.ctrl(probe_seq.pos));
4483
0
        let bitmask = group.match_byte(h2_hash).into_iter();
4484
0
4485
0
        RawIterHashInner {
4486
0
            bucket_mask: table.bucket_mask,
4487
0
            ctrl: table.ctrl,
4488
0
            h2_hash,
4489
0
            probe_seq,
4490
0
            group,
4491
0
            bitmask,
4492
0
        }
4493
0
    }
4494
}
4495
4496
impl<T> Iterator for RawIterHash<T> {
4497
    type Item = Bucket<T>;
4498
4499
0
    fn next(&mut self) -> Option<Bucket<T>> {
4500
0
        unsafe {
4501
0
            match self.inner.next() {
4502
0
                Some(index) => {
4503
0
                    // Can't use `RawTable::bucket` here as we don't have
4504
0
                    // an actual `RawTable` reference to use.
4505
0
                    debug_assert!(index <= self.inner.bucket_mask);
4506
0
                    let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4507
0
                    Some(bucket)
4508
                }
4509
0
                None => None,
4510
            }
4511
        }
4512
0
    }
4513
}
4514
4515
impl Iterator for RawIterHashInner {
4516
    type Item = usize;
4517
4518
0
    fn next(&mut self) -> Option<Self::Item> {
4519
        unsafe {
4520
            loop {
4521
0
                if let Some(bit) = self.bitmask.next() {
4522
0
                    let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4523
0
                    return Some(index);
4524
0
                }
4525
0
                if likely(self.group.match_empty().any_bit_set()) {
4526
0
                    return None;
4527
0
                }
4528
0
                self.probe_seq.move_next(self.bucket_mask);
4529
0
4530
0
                // Can't use `RawTableInner::ctrl` here as we don't have
4531
0
                // an actual `RawTableInner` reference to use.
4532
0
                let index = self.probe_seq.pos;
4533
0
                debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4534
0
                let group_ctrl = self.ctrl.as_ptr().add(index);
4535
0
4536
0
                self.group = Group::load(group_ctrl);
4537
0
                self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
4538
            }
4539
        }
4540
0
    }
4541
}
4542
4543
pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
4544
    pub iter: RawIter<T>,
4545
    pub table: &'a mut RawTable<T, A>,
4546
}
4547
4548
impl<T, A: Allocator> RawExtractIf<'_, T, A> {
4549
    #[cfg_attr(feature = "inline-more", inline)]
4550
0
    pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
4551
0
    where
4552
0
        F: FnMut(&mut T) -> bool,
4553
0
    {
4554
        unsafe {
4555
0
            for item in &mut self.iter {
4556
0
                if f(item.as_mut()) {
4557
0
                    return Some(self.table.remove(item).0);
4558
0
                }
4559
            }
4560
        }
4561
0
        None
4562
0
    }
4563
}
4564
4565
#[cfg(test)]
4566
mod test_map {
4567
    use super::*;
4568
4569
    fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
4570
        unsafe {
4571
            table.table.rehash_in_place(
4572
                &|table, index| hasher(table.bucket::<T>(index).as_ref()),
4573
                mem::size_of::<T>(),
4574
                if mem::needs_drop::<T>() {
4575
                    Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
4576
                } else {
4577
                    None
4578
                },
4579
            );
4580
        }
4581
    }
4582
4583
    #[test]
4584
    fn rehash() {
4585
        let mut table = RawTable::new();
4586
        let hasher = |i: &u64| *i;
4587
        for i in 0..100 {
4588
            table.insert(i, i, hasher);
4589
        }
4590
4591
        for i in 0..100 {
4592
            unsafe {
4593
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4594
            }
4595
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4596
        }
4597
4598
        rehash_in_place(&mut table, hasher);
4599
4600
        for i in 0..100 {
4601
            unsafe {
4602
                assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4603
            }
4604
            assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4605
        }
4606
    }
4607
4608
    /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4609
    /// AN UNINITIALIZED TABLE DURING THE DROP
4610
    #[test]
4611
    fn test_drop_uninitialized() {
4612
        use ::alloc::vec::Vec;
4613
4614
        let table = unsafe {
4615
            // SAFETY: The `buckets` is power of two and we're not
4616
            // trying to actually use the returned RawTable.
4617
            RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4618
                .unwrap()
4619
        };
4620
        drop(table);
4621
    }
4622
4623
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4624
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4625
    #[test]
4626
    fn test_drop_zero_items() {
4627
        use ::alloc::vec::Vec;
4628
        unsafe {
4629
            // SAFETY: The `buckets` is power of two and we're not
4630
            // trying to actually use the returned RawTable.
4631
            let table =
4632
                RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4633
                    .unwrap();
4634
4635
            // WE SIMULATE, AS IT WERE, A FULL TABLE.
4636
4637
            // SAFETY: We checked that the table is allocated and therefore the table already has
4638
            // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4639
            // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4640
            table
4641
                .table
4642
                .ctrl(0)
4643
                .write_bytes(EMPTY, table.table.num_ctrl_bytes());
4644
4645
            // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4646
            table.table.ctrl(0).write_bytes(0, table.capacity());
4647
4648
            // Fix up the trailing control bytes. See the comments in set_ctrl
4649
            // for the handling of tables smaller than the group width.
4650
            if table.buckets() < Group::WIDTH {
4651
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4652
                // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4653
                // `Group::WIDTH` is safe
4654
                table
4655
                    .table
4656
                    .ctrl(0)
4657
                    .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4658
            } else {
4659
                // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4660
                // control bytes,so copying `Group::WIDTH` bytes with offset equal
4661
                // to `self.buckets() == self.bucket_mask + 1` is safe
4662
                table
4663
                    .table
4664
                    .ctrl(0)
4665
                    .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4666
            }
4667
            drop(table);
4668
        }
4669
    }
4670
4671
    /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4672
    /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4673
    #[test]
4674
    fn test_catch_panic_clone_from() {
4675
        use ::alloc::sync::Arc;
4676
        use ::alloc::vec::Vec;
4677
        use allocator_api2::alloc::{AllocError, Allocator, Global};
4678
        use core::sync::atomic::{AtomicI8, Ordering};
4679
        use std::thread;
4680
4681
        struct MyAllocInner {
4682
            drop_count: Arc<AtomicI8>,
4683
        }
4684
4685
        #[derive(Clone)]
4686
        struct MyAlloc {
4687
            _inner: Arc<MyAllocInner>,
4688
        }
4689
4690
        impl Drop for MyAllocInner {
4691
            fn drop(&mut self) {
4692
                println!("MyAlloc freed.");
4693
                self.drop_count.fetch_sub(1, Ordering::SeqCst);
4694
            }
4695
        }
4696
4697
        unsafe impl Allocator for MyAlloc {
4698
            fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
4699
                let g = Global;
4700
                g.allocate(layout)
4701
            }
4702
4703
            unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4704
                let g = Global;
4705
                g.deallocate(ptr, layout)
4706
            }
4707
        }
4708
4709
        const DISARMED: bool = false;
4710
        const ARMED: bool = true;
4711
4712
        struct CheckedCloneDrop {
4713
            panic_in_clone: bool,
4714
            dropped: bool,
4715
            need_drop: Vec<u64>,
4716
        }
4717
4718
        impl Clone for CheckedCloneDrop {
4719
            fn clone(&self) -> Self {
4720
                if self.panic_in_clone {
4721
                    panic!("panic in clone")
4722
                }
4723
                Self {
4724
                    panic_in_clone: self.panic_in_clone,
4725
                    dropped: self.dropped,
4726
                    need_drop: self.need_drop.clone(),
4727
                }
4728
            }
4729
        }
4730
4731
        impl Drop for CheckedCloneDrop {
4732
            fn drop(&mut self) {
4733
                if self.dropped {
4734
                    panic!("double drop");
4735
                }
4736
                self.dropped = true;
4737
            }
4738
        }
4739
4740
        let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4741
4742
        let mut table = RawTable::new_in(MyAlloc {
4743
            _inner: Arc::new(MyAllocInner {
4744
                drop_count: dropped.clone(),
4745
            }),
4746
        });
4747
4748
        for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4749
            let idx = idx as u64;
4750
            table.insert(
4751
                idx,
4752
                (
4753
                    idx,
4754
                    CheckedCloneDrop {
4755
                        panic_in_clone,
4756
                        dropped: false,
4757
                        need_drop: vec![idx],
4758
                    },
4759
                ),
4760
                |(k, _)| *k,
4761
            );
4762
        }
4763
4764
        assert_eq!(table.len(), 7);
4765
4766
        thread::scope(|s| {
4767
            let result = s.spawn(|| {
4768
                let armed_flags = [
4769
                    DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4770
                ];
4771
                let mut scope_table = RawTable::new_in(MyAlloc {
4772
                    _inner: Arc::new(MyAllocInner {
4773
                        drop_count: dropped.clone(),
4774
                    }),
4775
                });
4776
                for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4777
                    let idx = idx as u64;
4778
                    scope_table.insert(
4779
                        idx,
4780
                        (
4781
                            idx,
4782
                            CheckedCloneDrop {
4783
                                panic_in_clone,
4784
                                dropped: false,
4785
                                need_drop: vec![idx + 100],
4786
                            },
4787
                        ),
4788
                        |(k, _)| *k,
4789
                    );
4790
                }
4791
                table.clone_from(&scope_table);
4792
            });
4793
            assert!(result.join().is_err());
4794
        });
4795
4796
        // Let's check that all iterators work fine and do not return elements
4797
        // (especially `RawIterRange`, which does not depend on the number of
4798
        // elements in the table, but looks directly at the control bytes)
4799
        //
4800
        // SAFETY: We know for sure that `RawTable` will outlive
4801
        // the returned `RawIter / RawIterRange` iterator.
4802
        assert_eq!(table.len(), 0);
4803
        assert_eq!(unsafe { table.iter().count() }, 0);
4804
        assert_eq!(unsafe { table.iter().iter.count() }, 0);
4805
4806
        for idx in 0..table.buckets() {
4807
            let idx = idx as u64;
4808
            assert!(
4809
                table.find(idx, |(k, _)| *k == idx).is_none(),
4810
                "Index: {idx}"
4811
            );
4812
        }
4813
4814
        // All allocator clones should already be dropped.
4815
        assert_eq!(dropped.load(Ordering::SeqCst), 1);
4816
    }
4817
}