/rust/registry/src/index.crates.io-1949cf8c6b5b557f/fixedbitset-0.5.7/src/lib.rs
Line | Count | Source |
1 | | //! `FixedBitSet` is a simple fixed size set of bits. |
2 | | //! |
3 | | //! ### Crate features |
4 | | //! |
5 | | //! - `std` (default feature) |
6 | | //! Disabling this feature disables using std and instead uses crate alloc. |
7 | | //! |
8 | | //! ### SIMD Acceleration |
9 | | //! `fixedbitset` is written with SIMD in mind. The backing store and set operations will use aligned SIMD data types and instructions when compiling |
10 | | //! for compatible target platforms. The use of SIMD generally enables better performance in many set and batch operations (i.e. intersection/union/inserting a range). |
11 | | //! |
12 | | //! When SIMD is not available on the target, the crate will gracefully fallback to a default implementation. It is intended to add support for other SIMD architectures |
13 | | //! once they appear in stable Rust. |
14 | | //! |
15 | | //! Currently only SSE2/AVX/AVX2 on x86/x86_64 and wasm32 SIMD are supported as this is what stable Rust supports. |
16 | | #![no_std] |
17 | | #![deny(clippy::undocumented_unsafe_blocks)] |
18 | | |
19 | | extern crate alloc; |
20 | | use alloc::{vec, vec::Vec}; |
21 | | |
22 | | mod block; |
23 | | mod range; |
24 | | |
25 | | #[cfg(feature = "serde")] |
26 | | extern crate serde; |
27 | | #[cfg(feature = "serde")] |
28 | | mod serde_impl; |
29 | | |
30 | | use core::fmt::Write; |
31 | | use core::fmt::{Binary, Display, Error, Formatter}; |
32 | | |
33 | | use core::cmp::Ordering; |
34 | | use core::hash::Hash; |
35 | | use core::iter::{Chain, FusedIterator}; |
36 | | use core::mem::ManuallyDrop; |
37 | | use core::mem::MaybeUninit; |
38 | | use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Index}; |
39 | | use core::ptr::NonNull; |
40 | | pub use range::IndexRange; |
41 | | |
42 | | pub(crate) const BITS: usize = core::mem::size_of::<Block>() * 8; |
43 | | #[cfg(feature = "serde")] |
44 | | pub(crate) const BYTES: usize = core::mem::size_of::<Block>(); |
45 | | |
46 | | use block::Block as SimdBlock; |
47 | | pub type Block = usize; |
48 | | |
49 | | #[inline] |
50 | 0 | fn div_rem(x: usize, denominator: usize) -> (usize, usize) { |
51 | 0 | (x / denominator, x % denominator) |
52 | 0 | } |
53 | | |
54 | 0 | fn vec_into_parts<T>(vec: Vec<T>) -> (NonNull<T>, usize, usize) { |
55 | 0 | let mut vec = ManuallyDrop::new(vec); |
56 | 0 | ( |
57 | 0 | // SAFETY: A Vec's internal pointer is always non-null. |
58 | 0 | unsafe { NonNull::new_unchecked(vec.as_mut_ptr()) }, |
59 | 0 | vec.capacity(), |
60 | 0 | vec.len(), |
61 | 0 | ) |
62 | 0 | } Unexecuted instantiation: fixedbitset::vec_into_parts::<core::mem::maybe_uninit::MaybeUninit<fixedbitset::block::sse2::Block>> Unexecuted instantiation: fixedbitset::vec_into_parts::<fixedbitset::block::sse2::Block> |
63 | | |
64 | | /// `FixedBitSet` is a simple fixed size set of bits that each can |
65 | | /// be enabled (1 / **true**) or disabled (0 / **false**). |
66 | | /// |
67 | | /// The bit set has a fixed capacity in terms of enabling bits (and the |
68 | | /// capacity can grow using the `grow` method). |
69 | | /// |
70 | | /// Derived traits depend on both the zeros and ones, so [0,1] is not equal to |
71 | | /// [0,1,0]. |
72 | | #[derive(Debug, Eq)] |
73 | | pub struct FixedBitSet { |
74 | | pub(crate) data: NonNull<MaybeUninit<SimdBlock>>, |
75 | | capacity: usize, |
76 | | /// length in bits |
77 | | pub(crate) length: usize, |
78 | | } |
79 | | |
80 | | // SAFETY: FixedBitset contains no thread-local state and can be safely sent between threads |
81 | | unsafe impl Send for FixedBitSet {} |
82 | | // SAFETY: FixedBitset does not provide simultaneous unsynchronized mutable access to the |
83 | | // underlying buffer. |
84 | | unsafe impl Sync for FixedBitSet {} |
85 | | |
86 | | impl FixedBitSet { |
87 | | /// Create a new empty **FixedBitSet**. |
88 | 0 | pub const fn new() -> Self { |
89 | 0 | FixedBitSet { |
90 | 0 | data: NonNull::dangling(), |
91 | 0 | capacity: 0, |
92 | 0 | length: 0, |
93 | 0 | } |
94 | 0 | } |
95 | | |
96 | | /// Create a new **FixedBitSet** with a specific number of bits, |
97 | | /// all initially clear. |
98 | 0 | pub fn with_capacity(bits: usize) -> Self { |
99 | 0 | let (mut blocks, rem) = div_rem(bits, SimdBlock::BITS); |
100 | 0 | blocks += (rem > 0) as usize; |
101 | 0 | Self::from_blocks_and_len(vec![SimdBlock::NONE; blocks], bits) |
102 | 0 | } |
103 | | |
104 | | #[inline] |
105 | 0 | fn from_blocks_and_len(data: Vec<SimdBlock>, length: usize) -> Self { |
106 | 0 | let (data, capacity, _) = vec_into_parts(data); |
107 | 0 | FixedBitSet { |
108 | 0 | data: data.cast(), |
109 | 0 | capacity, |
110 | 0 | length, |
111 | 0 | } |
112 | 0 | } |
113 | | |
114 | | /// Create a new **FixedBitSet** with a specific number of bits, |
115 | | /// initialized from provided blocks. |
116 | | /// |
117 | | /// If the blocks are not the exact size needed for the capacity |
118 | | /// they will be padded with zeros (if shorter) or truncated to |
119 | | /// the capacity (if longer). |
120 | | /// |
121 | | /// For example: |
122 | | /// ``` |
123 | | /// let data = vec![4]; |
124 | | /// let bs = fixedbitset::FixedBitSet::with_capacity_and_blocks(4, data); |
125 | | /// assert_eq!(format!("{:b}", bs), "0010"); |
126 | | /// ``` |
127 | 0 | pub fn with_capacity_and_blocks<I: IntoIterator<Item = Block>>(bits: usize, blocks: I) -> Self { |
128 | 0 | let mut bitset = Self::with_capacity(bits); |
129 | 0 | for (subblock, value) in bitset.as_mut_slice().iter_mut().zip(blocks.into_iter()) { |
130 | 0 | *subblock = value; |
131 | 0 | } |
132 | 0 | bitset |
133 | 0 | } |
134 | | |
135 | | /// Grow capacity to **bits**, all new bits initialized to zero |
136 | | #[inline] |
137 | 0 | pub fn grow(&mut self, bits: usize) { |
138 | | #[cold] |
139 | | #[track_caller] |
140 | | #[inline(never)] |
141 | 0 | fn do_grow(slf: &mut FixedBitSet, bits: usize) { |
142 | | // SAFETY: The provided fill is initialized to NONE. |
143 | 0 | unsafe { slf.grow_inner(bits, MaybeUninit::new(SimdBlock::NONE)) }; |
144 | 0 | } |
145 | | |
146 | 0 | if bits > self.length { |
147 | 0 | do_grow(self, bits); |
148 | 0 | } |
149 | 0 | } |
150 | | |
151 | | /// # Safety |
152 | | /// If `fill` is uninitialized, the memory must not be accessed and must be immediately |
153 | | /// written over |
154 | | #[inline(always)] |
155 | 0 | unsafe fn grow_inner(&mut self, bits: usize, fill: MaybeUninit<SimdBlock>) { |
156 | | // SAFETY: The data pointer and capacity were created from a Vec initially. The block |
157 | | // len is identical to that of the original. |
158 | 0 | let mut data = unsafe { |
159 | 0 | Vec::from_raw_parts(self.data.as_ptr(), self.simd_block_len(), self.capacity) |
160 | | }; |
161 | 0 | let (mut blocks, rem) = div_rem(bits, SimdBlock::BITS); |
162 | 0 | blocks += (rem > 0) as usize; |
163 | 0 | data.resize(blocks, fill); |
164 | 0 | let (data, capacity, _) = vec_into_parts(data); |
165 | 0 | self.data = data; |
166 | 0 | self.capacity = capacity; |
167 | 0 | self.length = bits; |
168 | 0 | } |
169 | | |
170 | | #[inline] |
171 | 0 | unsafe fn get_unchecked(&self, subblock: usize) -> &Block { |
172 | 0 | &*self.data.as_ptr().cast::<Block>().add(subblock) |
173 | 0 | } |
174 | | |
175 | | #[inline] |
176 | 0 | unsafe fn get_unchecked_mut(&mut self, subblock: usize) -> &mut Block { |
177 | 0 | &mut *self.data.as_ptr().cast::<Block>().add(subblock) |
178 | 0 | } |
179 | | |
180 | | #[inline] |
181 | 0 | fn usize_len(&self) -> usize { |
182 | 0 | let (mut blocks, rem) = div_rem(self.length, BITS); |
183 | 0 | blocks += (rem > 0) as usize; |
184 | 0 | blocks |
185 | 0 | } |
186 | | |
187 | | #[inline] |
188 | 0 | fn simd_block_len(&self) -> usize { |
189 | 0 | let (mut blocks, rem) = div_rem(self.length, SimdBlock::BITS); |
190 | 0 | blocks += (rem > 0) as usize; |
191 | 0 | blocks |
192 | 0 | } |
193 | | |
194 | | #[inline] |
195 | 0 | fn batch_count_ones(blocks: impl IntoIterator<Item = Block>) -> usize { |
196 | 0 | blocks.into_iter().map(|x| x.count_ones() as usize).sum() |
197 | 0 | } |
198 | | |
199 | | #[inline] |
200 | 0 | fn as_simd_slice(&self) -> &[SimdBlock] { |
201 | | // SAFETY: The slice constructed is within bounds of the underlying allocation. This function |
202 | | // is called with a read-only borrow so no other write can happen as long as the returned borrow lives. |
203 | 0 | unsafe { core::slice::from_raw_parts(self.data.as_ptr().cast(), self.simd_block_len()) } |
204 | 0 | } |
205 | | |
206 | | #[inline] |
207 | 0 | fn as_mut_simd_slice(&mut self) -> &mut [SimdBlock] { |
208 | | // SAFETY: The slice constructed is within bounds of the underlying allocation. This function |
209 | | // is called with a mutable borrow so no other read or write can happen as long as the returned borrow lives. |
210 | 0 | unsafe { core::slice::from_raw_parts_mut(self.data.as_ptr().cast(), self.simd_block_len()) } |
211 | 0 | } |
212 | | |
213 | | #[inline] |
214 | 0 | fn as_simd_slice_uninit(&self) -> &[MaybeUninit<SimdBlock>] { |
215 | | // SAFETY: The slice constructed is within bounds of the underlying allocation. This function |
216 | | // is called with a read-only borrow so no other write can happen as long as the returned borrow lives. |
217 | 0 | unsafe { core::slice::from_raw_parts(self.data.as_ptr(), self.simd_block_len()) } |
218 | 0 | } |
219 | | |
220 | | #[inline] |
221 | 0 | fn as_mut_simd_slice_uninit(&mut self) -> &mut [MaybeUninit<SimdBlock>] { |
222 | | // SAFETY: The slice constructed is within bounds of the underlying allocation. This function |
223 | | // is called with a mutable borrow so no other read or write can happen as long as the returned borrow lives. |
224 | 0 | unsafe { core::slice::from_raw_parts_mut(self.data.as_ptr(), self.simd_block_len()) } |
225 | 0 | } |
226 | | |
227 | | /// Grows the internal size of the bitset before inserting a bit |
228 | | /// |
229 | | /// Unlike `insert`, this cannot panic, but may allocate if the bit is outside of the existing buffer's range. |
230 | | /// |
231 | | /// This is faster than calling `grow` then `insert` in succession. |
232 | | #[inline] |
233 | 0 | pub fn grow_and_insert(&mut self, bits: usize) { |
234 | 0 | self.grow(bits + 1); |
235 | | |
236 | 0 | let (blocks, rem) = div_rem(bits, BITS); |
237 | | // SAFETY: The above grow ensures that the block is inside the Vec's allocation. |
238 | 0 | unsafe { |
239 | 0 | *self.get_unchecked_mut(blocks) |= 1 << rem; |
240 | 0 | } |
241 | 0 | } |
242 | | |
243 | | /// The length of the [`FixedBitSet`] in bits. |
244 | | /// |
245 | | /// Note: `len` includes both set and unset bits. |
246 | | /// ``` |
247 | | /// # use fixedbitset::FixedBitSet; |
248 | | /// let bitset = FixedBitSet::with_capacity(10); |
249 | | /// // there are 0 set bits, but 10 unset bits |
250 | | /// assert_eq!(bitset.len(), 10); |
251 | | /// ``` |
252 | | /// `len` does not return the count of set bits. For that, use |
253 | | /// [`bitset.count_ones(..)`](FixedBitSet::count_ones) instead. |
254 | | #[inline] |
255 | 0 | pub fn len(&self) -> usize { |
256 | 0 | self.length |
257 | 0 | } |
258 | | |
259 | | /// `true` if the [`FixedBitSet`] is empty. |
260 | | /// |
261 | | /// Note that an "empty" `FixedBitSet` is a `FixedBitSet` with |
262 | | /// no bits (meaning: it's length is zero). If you want to check |
263 | | /// if all bits are unset, use [`FixedBitSet::is_clear`]. |
264 | | /// |
265 | | /// ``` |
266 | | /// # use fixedbitset::FixedBitSet; |
267 | | /// let bitset = FixedBitSet::with_capacity(10); |
268 | | /// assert!(!bitset.is_empty()); |
269 | | /// |
270 | | /// let bitset = FixedBitSet::with_capacity(0); |
271 | | /// assert!(bitset.is_empty()); |
272 | | /// ``` |
273 | | #[inline] |
274 | 0 | pub fn is_empty(&self) -> bool { |
275 | 0 | self.len() == 0 |
276 | 0 | } |
277 | | |
278 | | /// `true` if all bits in the [`FixedBitSet`] are unset. |
279 | | /// |
280 | | /// As opposed to [`FixedBitSet::is_empty`], which is `true` only for |
281 | | /// sets without any bits, set or unset. |
282 | | /// |
283 | | /// ``` |
284 | | /// # use fixedbitset::FixedBitSet; |
285 | | /// let mut bitset = FixedBitSet::with_capacity(10); |
286 | | /// assert!(bitset.is_clear()); |
287 | | /// |
288 | | /// bitset.insert(2); |
289 | | /// assert!(!bitset.is_clear()); |
290 | | /// ``` |
291 | | /// |
292 | | /// This is equivalent to [`bitset.count_ones(..) == 0`](FixedBitSet::count_ones). |
293 | | #[inline] |
294 | 0 | pub fn is_clear(&self) -> bool { |
295 | 0 | self.as_simd_slice().iter().all(|block| block.is_empty()) |
296 | 0 | } |
297 | | |
298 | | /// Finds the lowest set bit in the bitset. |
299 | | /// |
300 | | /// Returns `None` if there aren't any set bits. |
301 | | /// |
302 | | /// ``` |
303 | | /// # use fixedbitset::FixedBitSet; |
304 | | /// let mut bitset = FixedBitSet::with_capacity(10); |
305 | | /// assert_eq!(bitset.minimum(), None); |
306 | | /// |
307 | | /// bitset.insert(2); |
308 | | /// assert_eq!(bitset.minimum(), Some(2)); |
309 | | /// bitset.insert(8); |
310 | | /// assert_eq!(bitset.minimum(), Some(2)); |
311 | | /// ``` |
312 | | #[inline] |
313 | 0 | pub fn minimum(&self) -> Option<usize> { |
314 | 0 | let (block_idx, block) = self |
315 | 0 | .as_simd_slice() |
316 | 0 | .iter() |
317 | 0 | .enumerate() |
318 | 0 | .find(|&(_, block)| !block.is_empty())?; |
319 | 0 | let mut inner = 0; |
320 | 0 | let mut trailing = 0; |
321 | 0 | for subblock in block.into_usize_array() { |
322 | 0 | if subblock != 0 { |
323 | 0 | trailing = subblock.trailing_zeros() as usize; |
324 | 0 | break; |
325 | 0 | } else { |
326 | 0 | inner += BITS; |
327 | 0 | } |
328 | | } |
329 | 0 | Some(block_idx * SimdBlock::BITS + inner + trailing) |
330 | 0 | } |
331 | | |
332 | | /// Finds the highest set bit in the bitset. |
333 | | /// |
334 | | /// Returns `None` if there aren't any set bits. |
335 | | /// |
336 | | /// ``` |
337 | | /// # use fixedbitset::FixedBitSet; |
338 | | /// let mut bitset = FixedBitSet::with_capacity(10); |
339 | | /// assert_eq!(bitset.maximum(), None); |
340 | | /// |
341 | | /// bitset.insert(8); |
342 | | /// assert_eq!(bitset.maximum(), Some(8)); |
343 | | /// bitset.insert(2); |
344 | | /// assert_eq!(bitset.maximum(), Some(8)); |
345 | | /// ``` |
346 | | #[inline] |
347 | 0 | pub fn maximum(&self) -> Option<usize> { |
348 | 0 | let (block_idx, block) = self |
349 | 0 | .as_simd_slice() |
350 | 0 | .iter() |
351 | 0 | .rev() |
352 | 0 | .enumerate() |
353 | 0 | .find(|&(_, block)| !block.is_empty())?; |
354 | 0 | let mut inner = 0; |
355 | 0 | let mut leading = 0; |
356 | 0 | for subblock in block.into_usize_array().iter().rev() { |
357 | 0 | if *subblock != 0 { |
358 | 0 | leading = subblock.leading_zeros() as usize; |
359 | 0 | break; |
360 | 0 | } else { |
361 | 0 | inner += BITS; |
362 | 0 | } |
363 | | } |
364 | 0 | let max = self.simd_block_len() * SimdBlock::BITS; |
365 | 0 | Some(max - block_idx * SimdBlock::BITS - inner - leading - 1) |
366 | 0 | } |
367 | | |
368 | | /// `true` if all bits in the [`FixedBitSet`] are set. |
369 | | /// |
370 | | /// ``` |
371 | | /// # use fixedbitset::FixedBitSet; |
372 | | /// let mut bitset = FixedBitSet::with_capacity(10); |
373 | | /// assert!(!bitset.is_full()); |
374 | | /// |
375 | | /// bitset.insert_range(..); |
376 | | /// assert!(bitset.is_full()); |
377 | | /// ``` |
378 | | /// |
379 | | /// This is equivalent to [`bitset.count_ones(..) == bitset.len()`](FixedBitSet::count_ones). |
380 | | #[inline] |
381 | 0 | pub fn is_full(&self) -> bool { |
382 | 0 | self.contains_all_in_range(..) |
383 | 0 | } |
384 | | |
385 | | /// Return **true** if the bit is enabled in the **FixedBitSet**, |
386 | | /// **false** otherwise. |
387 | | /// |
388 | | /// Note: bits outside the capacity are always disabled. |
389 | | /// |
390 | | /// Note: Also available with index syntax: `bitset[bit]`. |
391 | | #[inline] |
392 | 0 | pub fn contains(&self, bit: usize) -> bool { |
393 | 0 | (bit < self.length) |
394 | | // SAFETY: The above check ensures that the block and bit are within bounds. |
395 | 0 | .then(|| unsafe { self.contains_unchecked(bit) }) |
396 | 0 | .unwrap_or(false) |
397 | 0 | } |
398 | | |
399 | | /// Return **true** if the bit is enabled in the **FixedBitSet**, |
400 | | /// **false** otherwise. |
401 | | /// |
402 | | /// Note: unlike `contains`, calling this with an invalid `bit` |
403 | | /// is undefined behavior. |
404 | | /// |
405 | | /// # Safety |
406 | | /// `bit` must be less than `self.len()` |
407 | | #[inline] |
408 | 0 | pub unsafe fn contains_unchecked(&self, bit: usize) -> bool { |
409 | 0 | let (block, i) = div_rem(bit, BITS); |
410 | 0 | (self.get_unchecked(block) & (1 << i)) != 0 |
411 | 0 | } |
412 | | |
413 | | /// Clear all bits. |
414 | | #[inline] |
415 | 0 | pub fn clear(&mut self) { |
416 | 0 | for elt in self.as_mut_simd_slice().iter_mut() { |
417 | 0 | *elt = SimdBlock::NONE |
418 | | } |
419 | 0 | } |
420 | | |
421 | | /// Enable `bit`. |
422 | | /// |
423 | | /// **Panics** if **bit** is out of bounds. |
424 | | #[inline] |
425 | 0 | pub fn insert(&mut self, bit: usize) { |
426 | 0 | assert!( |
427 | 0 | bit < self.length, |
428 | 0 | "insert at index {} exceeds fixedbitset size {}", |
429 | | bit, |
430 | | self.length |
431 | | ); |
432 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
433 | 0 | unsafe { |
434 | 0 | self.insert_unchecked(bit); |
435 | 0 | } |
436 | 0 | } |
437 | | |
438 | | /// Enable `bit` without any length checks. |
439 | | /// |
440 | | /// # Safety |
441 | | /// `bit` must be less than `self.len()` |
442 | | #[inline] |
443 | 0 | pub unsafe fn insert_unchecked(&mut self, bit: usize) { |
444 | 0 | let (block, i) = div_rem(bit, BITS); |
445 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
446 | 0 | unsafe { |
447 | 0 | *self.get_unchecked_mut(block) |= 1 << i; |
448 | 0 | } |
449 | 0 | } |
450 | | |
451 | | /// Disable `bit`. |
452 | | /// |
453 | | /// **Panics** if **bit** is out of bounds. |
454 | | #[inline] |
455 | 0 | pub fn remove(&mut self, bit: usize) { |
456 | 0 | assert!( |
457 | 0 | bit < self.length, |
458 | 0 | "remove at index {} exceeds fixedbitset size {}", |
459 | | bit, |
460 | | self.length |
461 | | ); |
462 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
463 | 0 | unsafe { |
464 | 0 | self.remove_unchecked(bit); |
465 | 0 | } |
466 | 0 | } |
467 | | |
468 | | /// Disable `bit` without any bounds checking. |
469 | | /// |
470 | | /// # Safety |
471 | | /// `bit` must be less than `self.len()` |
472 | | #[inline] |
473 | 0 | pub unsafe fn remove_unchecked(&mut self, bit: usize) { |
474 | 0 | let (block, i) = div_rem(bit, BITS); |
475 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
476 | 0 | unsafe { |
477 | 0 | *self.get_unchecked_mut(block) &= !(1 << i); |
478 | 0 | } |
479 | 0 | } |
480 | | |
481 | | /// Enable `bit`, and return its previous value. |
482 | | /// |
483 | | /// **Panics** if **bit** is out of bounds. |
484 | | #[inline] |
485 | 0 | pub fn put(&mut self, bit: usize) -> bool { |
486 | 0 | assert!( |
487 | 0 | bit < self.length, |
488 | 0 | "put at index {} exceeds fixedbitset size {}", |
489 | | bit, |
490 | | self.length |
491 | | ); |
492 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
493 | 0 | unsafe { self.put_unchecked(bit) } |
494 | 0 | } |
495 | | |
496 | | /// Enable `bit`, and return its previous value without doing any bounds checking. |
497 | | /// |
498 | | /// # Safety |
499 | | /// `bit` must be less than `self.len()` |
500 | | #[inline] |
501 | 0 | pub unsafe fn put_unchecked(&mut self, bit: usize) -> bool { |
502 | 0 | let (block, i) = div_rem(bit, BITS); |
503 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
504 | | unsafe { |
505 | 0 | let word = self.get_unchecked_mut(block); |
506 | 0 | let prev = *word & (1 << i) != 0; |
507 | 0 | *word |= 1 << i; |
508 | 0 | prev |
509 | | } |
510 | 0 | } |
511 | | |
512 | | /// Toggle `bit` (inverting its state). |
513 | | /// |
514 | | /// ***Panics*** if **bit** is out of bounds |
515 | | #[inline] |
516 | 0 | pub fn toggle(&mut self, bit: usize) { |
517 | 0 | assert!( |
518 | 0 | bit < self.length, |
519 | 0 | "toggle at index {} exceeds fixedbitset size {}", |
520 | | bit, |
521 | | self.length |
522 | | ); |
523 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
524 | 0 | unsafe { |
525 | 0 | self.toggle_unchecked(bit); |
526 | 0 | } |
527 | 0 | } |
528 | | |
529 | | /// Toggle `bit` (inverting its state) without any bounds checking. |
530 | | /// |
531 | | /// # Safety |
532 | | /// `bit` must be less than `self.len()` |
533 | | #[inline] |
534 | 0 | pub unsafe fn toggle_unchecked(&mut self, bit: usize) { |
535 | 0 | let (block, i) = div_rem(bit, BITS); |
536 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
537 | 0 | unsafe { |
538 | 0 | *self.get_unchecked_mut(block) ^= 1 << i; |
539 | 0 | } |
540 | 0 | } |
541 | | |
542 | | /// Sets a bit to the provided `enabled` value. |
543 | | /// |
544 | | /// **Panics** if **bit** is out of bounds. |
545 | | #[inline] |
546 | 0 | pub fn set(&mut self, bit: usize, enabled: bool) { |
547 | 0 | assert!( |
548 | 0 | bit < self.length, |
549 | 0 | "set at index {} exceeds fixedbitset size {}", |
550 | | bit, |
551 | | self.length |
552 | | ); |
553 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
554 | 0 | unsafe { |
555 | 0 | self.set_unchecked(bit, enabled); |
556 | 0 | } |
557 | 0 | } |
558 | | |
559 | | /// Sets a bit to the provided `enabled` value without doing any bounds checking. |
560 | | /// |
561 | | /// # Safety |
562 | | /// `bit` must be less than `self.len()` |
563 | | #[inline] |
564 | 0 | pub unsafe fn set_unchecked(&mut self, bit: usize, enabled: bool) { |
565 | 0 | let (block, i) = div_rem(bit, BITS); |
566 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
567 | 0 | let elt = unsafe { self.get_unchecked_mut(block) }; |
568 | 0 | if enabled { |
569 | 0 | *elt |= 1 << i; |
570 | 0 | } else { |
571 | 0 | *elt &= !(1 << i); |
572 | 0 | } |
573 | 0 | } |
574 | | |
575 | | /// Copies boolean value from specified bit to the specified bit. |
576 | | /// |
577 | | /// If `from` is out-of-bounds, `to` will be unset. |
578 | | /// |
579 | | /// **Panics** if **to** is out of bounds. |
580 | | #[inline] |
581 | 0 | pub fn copy_bit(&mut self, from: usize, to: usize) { |
582 | 0 | assert!( |
583 | 0 | to < self.length, |
584 | 0 | "copy to index {} exceeds fixedbitset size {}", |
585 | | to, |
586 | | self.length |
587 | | ); |
588 | 0 | let enabled = self.contains(from); |
589 | | // SAFETY: The above assertion ensures that the block is inside the Vec's allocation. |
590 | 0 | unsafe { self.set_unchecked(to, enabled) }; |
591 | 0 | } |
592 | | |
593 | | /// Copies boolean value from specified bit to the specified bit. |
594 | | /// |
595 | | /// Note: unlike `copy_bit`, calling this with an invalid `from` |
596 | | /// is undefined behavior. |
597 | | /// |
598 | | /// # Safety |
599 | | /// `to` must both be less than `self.len()` |
600 | | #[inline] |
601 | 0 | pub unsafe fn copy_bit_unchecked(&mut self, from: usize, to: usize) { |
602 | | // SAFETY: Caller must ensure that `from` is within bounds. |
603 | 0 | let enabled = self.contains_unchecked(from); |
604 | | // SAFETY: Caller must ensure that `to` is within bounds. |
605 | 0 | self.set_unchecked(to, enabled); |
606 | 0 | } |
607 | | |
608 | | /// Count the number of set bits in the given bit range. |
609 | | /// |
610 | | /// This function is potentially much faster than using `ones(other).count()`. |
611 | | /// Use `..` to count the whole content of the bitset. |
612 | | /// |
613 | | /// **Panics** if the range extends past the end of the bitset. |
614 | | #[inline] |
615 | 0 | pub fn count_ones<T: IndexRange>(&self, range: T) -> usize { |
616 | 0 | Self::batch_count_ones(Masks::new(range, self.length).map(|(block, mask)| { |
617 | | // SAFETY: Masks cannot return a block index that is out of range. |
618 | 0 | unsafe { *self.get_unchecked(block) & mask } |
619 | 0 | })) |
620 | 0 | } |
621 | | |
622 | | /// Count the number of unset bits in the given bit range. |
623 | | /// |
624 | | /// This function is potentially much faster than using `zeroes(other).count()`. |
625 | | /// Use `..` to count the whole content of the bitset. |
626 | | /// |
627 | | /// **Panics** if the range extends past the end of the bitset. |
628 | | #[inline] |
629 | 0 | pub fn count_zeroes<T: IndexRange>(&self, range: T) -> usize { |
630 | 0 | Self::batch_count_ones(Masks::new(range, self.length).map(|(block, mask)| { |
631 | | // SAFETY: Masks cannot return a block index that is out of range. |
632 | 0 | unsafe { !*self.get_unchecked(block) & mask } |
633 | 0 | })) |
634 | 0 | } |
635 | | |
636 | | /// Sets every bit in the given range to the given state (`enabled`) |
637 | | /// |
638 | | /// Use `..` to set the whole bitset. |
639 | | /// |
640 | | /// **Panics** if the range extends past the end of the bitset. |
641 | | #[inline] |
642 | 0 | pub fn set_range<T: IndexRange>(&mut self, range: T, enabled: bool) { |
643 | 0 | if enabled { |
644 | 0 | self.insert_range(range); |
645 | 0 | } else { |
646 | 0 | self.remove_range(range); |
647 | 0 | } |
648 | 0 | } |
649 | | |
650 | | /// Enables every bit in the given range. |
651 | | /// |
652 | | /// Use `..` to make the whole bitset ones. |
653 | | /// |
654 | | /// **Panics** if the range extends past the end of the bitset. |
655 | | #[inline] |
656 | 0 | pub fn insert_range<T: IndexRange>(&mut self, range: T) { |
657 | 0 | for (block, mask) in Masks::new(range, self.length) { |
658 | 0 | // SAFETY: Masks cannot return a block index that is out of range. |
659 | 0 | let block = unsafe { self.get_unchecked_mut(block) }; |
660 | 0 | *block |= mask; |
661 | 0 | } |
662 | 0 | } |
663 | | |
664 | | /// Disables every bit in the given range. |
665 | | /// |
666 | | /// Use `..` to make the whole bitset ones. |
667 | | /// |
668 | | /// **Panics** if the range extends past the end of the bitset. |
669 | | #[inline] |
670 | 0 | pub fn remove_range<T: IndexRange>(&mut self, range: T) { |
671 | 0 | for (block, mask) in Masks::new(range, self.length) { |
672 | 0 | // SAFETY: Masks cannot return a block index that is out of range. |
673 | 0 | let block = unsafe { self.get_unchecked_mut(block) }; |
674 | 0 | *block &= !mask; |
675 | 0 | } |
676 | 0 | } |
677 | | |
678 | | /// Toggles (inverts) every bit in the given range. |
679 | | /// |
680 | | /// Use `..` to toggle the whole bitset. |
681 | | /// |
682 | | /// **Panics** if the range extends past the end of the bitset. |
683 | | #[inline] |
684 | 0 | pub fn toggle_range<T: IndexRange>(&mut self, range: T) { |
685 | 0 | for (block, mask) in Masks::new(range, self.length) { |
686 | 0 | // SAFETY: Masks cannot return a block index that is out of range. |
687 | 0 | let block = unsafe { self.get_unchecked_mut(block) }; |
688 | 0 | *block ^= mask; |
689 | 0 | } |
690 | 0 | } |
691 | | |
692 | | /// Checks if the bitset contains every bit in the given range. |
693 | | /// |
694 | | /// **Panics** if the range extends past the end of the bitset. |
695 | | #[inline] |
696 | 0 | pub fn contains_all_in_range<T: IndexRange>(&self, range: T) -> bool { |
697 | 0 | for (block, mask) in Masks::new(range, self.length) { |
698 | | // SAFETY: Masks cannot return a block index that is out of range. |
699 | 0 | let block = unsafe { self.get_unchecked(block) }; |
700 | 0 | if block & mask != mask { |
701 | 0 | return false; |
702 | 0 | } |
703 | | } |
704 | 0 | true |
705 | 0 | } |
706 | | |
707 | | /// Checks if the bitset contains at least one set bit in the given range. |
708 | | /// |
709 | | /// **Panics** if the range extends past the end of the bitset. |
710 | | #[inline] |
711 | 0 | pub fn contains_any_in_range<T: IndexRange>(&self, range: T) -> bool { |
712 | 0 | for (block, mask) in Masks::new(range, self.length) { |
713 | | // SAFETY: Masks cannot return a block index that is out of range. |
714 | 0 | let block = unsafe { self.get_unchecked(block) }; |
715 | 0 | if block & mask != 0 { |
716 | 0 | return true; |
717 | 0 | } |
718 | | } |
719 | 0 | false |
720 | 0 | } |
721 | | |
722 | | /// View the bitset as a slice of `Block` blocks |
723 | | #[inline] |
724 | 0 | pub fn as_slice(&self) -> &[Block] { |
725 | | // SAFETY: The bits from both usize and Block are required to be reinterprettable, and |
726 | | // neither have any padding or alignment issues. The slice constructed is within bounds |
727 | | // of the underlying allocation. This function is called with a read-only borrow so |
728 | | // no other write can happen as long as the returned borrow lives. |
729 | | unsafe { |
730 | 0 | let ptr = self.data.as_ptr().cast::<Block>(); |
731 | 0 | core::slice::from_raw_parts(ptr, self.usize_len()) |
732 | | } |
733 | 0 | } |
734 | | |
735 | | /// View the bitset as a mutable slice of `Block` blocks. Writing past the bitlength in the last |
736 | | /// will cause `contains` to return potentially incorrect results for bits past the bitlength. |
737 | | #[inline] |
738 | 0 | pub fn as_mut_slice(&mut self) -> &mut [Block] { |
739 | | // SAFETY: The bits from both usize and Block are required to be reinterprettable, and |
740 | | // neither have any padding or alignment issues. The slice constructed is within bounds |
741 | | // of the underlying allocation. This function is called with a mutable borrow so |
742 | | // no other read or write can happen as long as the returned borrow lives. |
743 | 0 | unsafe { |
744 | 0 | let ptr = self.data.as_ptr().cast::<Block>(); |
745 | 0 | core::slice::from_raw_parts_mut(ptr, self.usize_len()) |
746 | 0 | } |
747 | 0 | } |
748 | | |
749 | | /// Iterates over all enabled bits. |
750 | | /// |
751 | | /// Iterator element is the index of the `1` bit, type `usize`. |
752 | | #[inline] |
753 | 0 | pub fn ones(&self) -> Ones { |
754 | 0 | match self.as_slice().split_first() { |
755 | 0 | Some((&first_block, rem)) => { |
756 | 0 | let (&last_block, rem) = rem.split_last().unwrap_or((&0, rem)); |
757 | 0 | Ones { |
758 | 0 | bitset_front: first_block, |
759 | 0 | bitset_back: last_block, |
760 | 0 | block_idx_front: 0, |
761 | 0 | block_idx_back: (1 + rem.len()) * BITS, |
762 | 0 | remaining_blocks: rem.iter(), |
763 | 0 | } |
764 | | } |
765 | 0 | None => Ones { |
766 | 0 | bitset_front: 0, |
767 | 0 | bitset_back: 0, |
768 | 0 | block_idx_front: 0, |
769 | 0 | block_idx_back: 0, |
770 | 0 | remaining_blocks: [].iter(), |
771 | 0 | }, |
772 | | } |
773 | 0 | } |
774 | | |
775 | | /// Iterates over all enabled bits. |
776 | | /// |
777 | | /// Iterator element is the index of the `1` bit, type `usize`. |
778 | | /// Unlike `ones`, this function consumes the `FixedBitset`. |
779 | 0 | pub fn into_ones(self) -> IntoOnes { |
780 | 0 | let ptr = self.data.as_ptr().cast(); |
781 | 0 | let len = self.simd_block_len() * SimdBlock::USIZE_COUNT; |
782 | | // SAFETY: |
783 | | // - ptr comes from self.data, so it is valid; |
784 | | // - self.data is valid for self.data.len() SimdBlocks, |
785 | | // which is exactly self.data.len() * SimdBlock::USIZE_COUNT usizes; |
786 | | // - we will keep this slice around only as long as self.data is, |
787 | | // so it won't become dangling. |
788 | 0 | let slice = unsafe { core::slice::from_raw_parts(ptr, len) }; |
789 | | // SAFETY: The data pointer and capacity were created from a Vec initially. The block |
790 | | // len is identical to that of the original. |
791 | 0 | let data: Vec<SimdBlock> = unsafe { |
792 | 0 | Vec::from_raw_parts( |
793 | 0 | self.data.as_ptr().cast(), |
794 | 0 | self.simd_block_len(), |
795 | 0 | self.capacity, |
796 | | ) |
797 | | }; |
798 | 0 | let mut iter = slice.iter().copied(); |
799 | | |
800 | 0 | core::mem::forget(self); |
801 | | |
802 | 0 | IntoOnes { |
803 | 0 | bitset_front: iter.next().unwrap_or(0), |
804 | 0 | bitset_back: iter.next_back().unwrap_or(0), |
805 | 0 | block_idx_front: 0, |
806 | 0 | block_idx_back: len.saturating_sub(1) * BITS, |
807 | 0 | remaining_blocks: iter, |
808 | 0 | _buf: data, |
809 | 0 | } |
810 | 0 | } |
811 | | |
812 | | /// Iterates over all disabled bits. |
813 | | /// |
814 | | /// Iterator element is the index of the `0` bit, type `usize`. |
815 | | #[inline] |
816 | 0 | pub fn zeroes(&self) -> Zeroes { |
817 | 0 | match self.as_slice().split_first() { |
818 | 0 | Some((&block, rem)) => Zeroes { |
819 | 0 | bitset: !block, |
820 | 0 | block_idx: 0, |
821 | 0 | len: self.len(), |
822 | 0 | remaining_blocks: rem.iter(), |
823 | 0 | }, |
824 | 0 | None => Zeroes { |
825 | 0 | bitset: !0, |
826 | 0 | block_idx: 0, |
827 | 0 | len: self.len(), |
828 | 0 | remaining_blocks: [].iter(), |
829 | 0 | }, |
830 | | } |
831 | 0 | } |
832 | | |
833 | | /// Returns a lazy iterator over the intersection of two `FixedBitSet`s |
834 | 0 | pub fn intersection<'a>(&'a self, other: &'a FixedBitSet) -> Intersection<'a> { |
835 | 0 | Intersection { |
836 | 0 | iter: self.ones(), |
837 | 0 | other, |
838 | 0 | } |
839 | 0 | } |
840 | | |
841 | | /// Returns a lazy iterator over the union of two `FixedBitSet`s. |
842 | 0 | pub fn union<'a>(&'a self, other: &'a FixedBitSet) -> Union<'a> { |
843 | 0 | Union { |
844 | 0 | iter: self.ones().chain(other.difference(self)), |
845 | 0 | } |
846 | 0 | } |
847 | | |
848 | | /// Returns a lazy iterator over the difference of two `FixedBitSet`s. The difference of `a` |
849 | | /// and `b` is the elements of `a` which are not in `b`. |
850 | 0 | pub fn difference<'a>(&'a self, other: &'a FixedBitSet) -> Difference<'a> { |
851 | 0 | Difference { |
852 | 0 | iter: self.ones(), |
853 | 0 | other, |
854 | 0 | } |
855 | 0 | } |
856 | | |
857 | | /// Returns a lazy iterator over the symmetric difference of two `FixedBitSet`s. |
858 | | /// The symmetric difference of `a` and `b` is the elements of one, but not both, sets. |
859 | 0 | pub fn symmetric_difference<'a>(&'a self, other: &'a FixedBitSet) -> SymmetricDifference<'a> { |
860 | 0 | SymmetricDifference { |
861 | 0 | iter: self.difference(other).chain(other.difference(self)), |
862 | 0 | } |
863 | 0 | } |
864 | | |
865 | | /// In-place union of two `FixedBitSet`s. |
866 | | /// |
867 | | /// On calling this method, `self`'s capacity may be increased to match `other`'s. |
868 | 0 | pub fn union_with(&mut self, other: &FixedBitSet) { |
869 | 0 | if other.len() >= self.len() { |
870 | 0 | self.grow(other.len()); |
871 | 0 | } |
872 | 0 | self.as_mut_simd_slice() |
873 | 0 | .iter_mut() |
874 | 0 | .zip(other.as_simd_slice().iter()) |
875 | 0 | .for_each(|(x, y)| *x |= *y); |
876 | 0 | } |
877 | | |
878 | | /// In-place intersection of two `FixedBitSet`s. |
879 | | /// |
880 | | /// On calling this method, `self`'s capacity will remain the same as before. |
881 | 0 | pub fn intersect_with(&mut self, other: &FixedBitSet) { |
882 | 0 | let me = self.as_mut_simd_slice(); |
883 | 0 | let other = other.as_simd_slice(); |
884 | 0 | me.iter_mut().zip(other.iter()).for_each(|(x, y)| { |
885 | 0 | *x &= *y; |
886 | 0 | }); |
887 | 0 | let mn = core::cmp::min(me.len(), other.len()); |
888 | 0 | for wd in &mut me[mn..] { |
889 | 0 | *wd = SimdBlock::NONE; |
890 | 0 | } |
891 | 0 | } |
892 | | |
893 | | /// In-place difference of two `FixedBitSet`s. |
894 | | /// |
895 | | /// On calling this method, `self`'s capacity will remain the same as before. |
896 | 0 | pub fn difference_with(&mut self, other: &FixedBitSet) { |
897 | 0 | self.as_mut_simd_slice() |
898 | 0 | .iter_mut() |
899 | 0 | .zip(other.as_simd_slice().iter()) |
900 | 0 | .for_each(|(x, y)| { |
901 | 0 | *x &= !*y; |
902 | 0 | }); |
903 | | |
904 | | // There's no need to grow self or do any other adjustments. |
905 | | // |
906 | | // * If self is longer than other, the bits at the end of self won't be affected since other |
907 | | // has them implicitly set to 0. |
908 | | // * If other is longer than self, the bits at the end of other are irrelevant since self |
909 | | // has them set to 0 anyway. |
910 | 0 | } |
911 | | |
912 | | /// In-place symmetric difference of two `FixedBitSet`s. |
913 | | /// |
914 | | /// On calling this method, `self`'s capacity may be increased to match `other`'s. |
915 | 0 | pub fn symmetric_difference_with(&mut self, other: &FixedBitSet) { |
916 | 0 | if other.len() >= self.len() { |
917 | 0 | self.grow(other.len()); |
918 | 0 | } |
919 | 0 | self.as_mut_simd_slice() |
920 | 0 | .iter_mut() |
921 | 0 | .zip(other.as_simd_slice().iter()) |
922 | 0 | .for_each(|(x, y)| { |
923 | 0 | *x ^= *y; |
924 | 0 | }); |
925 | 0 | } |
926 | | |
927 | | /// Computes how many bits would be set in the union between two bitsets. |
928 | | /// |
929 | | /// This is potentially much faster than using `union(other).count()`. Unlike |
930 | | /// other methods like using [`union_with`] followed by [`count_ones`], this |
931 | | /// does not mutate in place or require separate allocations. |
932 | | #[inline] |
933 | 0 | pub fn union_count(&self, other: &FixedBitSet) -> usize { |
934 | 0 | let me = self.as_slice(); |
935 | 0 | let other = other.as_slice(); |
936 | 0 | let count = Self::batch_count_ones(me.iter().zip(other.iter()).map(|(x, y)| (*x | *y))); |
937 | 0 | match other.len().cmp(&me.len()) { |
938 | 0 | Ordering::Greater => count + Self::batch_count_ones(other[me.len()..].iter().copied()), |
939 | 0 | Ordering::Less => count + Self::batch_count_ones(me[other.len()..].iter().copied()), |
940 | 0 | Ordering::Equal => count, |
941 | | } |
942 | 0 | } |
943 | | |
944 | | /// Computes how many bits would be set in the intersection between two bitsets. |
945 | | /// |
946 | | /// This is potentially much faster than using `intersection(other).count()`. Unlike |
947 | | /// other methods like using [`intersect_with`] followed by [`count_ones`], this |
948 | | /// does not mutate in place or require separate allocations. |
949 | | #[inline] |
950 | 0 | pub fn intersection_count(&self, other: &FixedBitSet) -> usize { |
951 | 0 | Self::batch_count_ones( |
952 | 0 | self.as_slice() |
953 | 0 | .iter() |
954 | 0 | .zip(other.as_slice()) |
955 | 0 | .map(|(x, y)| (*x & *y)), |
956 | | ) |
957 | 0 | } |
958 | | |
959 | | /// Computes how many bits would be set in the difference between two bitsets. |
960 | | /// |
961 | | /// This is potentially much faster than using `difference(other).count()`. Unlike |
962 | | /// other methods like using [`difference_with`] followed by [`count_ones`], this |
963 | | /// does not mutate in place or require separate allocations. |
964 | | #[inline] |
965 | 0 | pub fn difference_count(&self, other: &FixedBitSet) -> usize { |
966 | 0 | Self::batch_count_ones( |
967 | 0 | self.as_slice() |
968 | 0 | .iter() |
969 | 0 | .zip(other.as_slice().iter()) |
970 | 0 | .map(|(x, y)| (*x & !*y)), |
971 | | ) |
972 | 0 | } |
973 | | |
974 | | /// Computes how many bits would be set in the symmetric difference between two bitsets. |
975 | | /// |
976 | | /// This is potentially much faster than using `symmetric_difference(other).count()`. Unlike |
977 | | /// other methods like using [`symmetric_difference_with`] followed by [`count_ones`], this |
978 | | /// does not mutate in place or require separate allocations. |
979 | | #[inline] |
980 | 0 | pub fn symmetric_difference_count(&self, other: &FixedBitSet) -> usize { |
981 | 0 | let me = self.as_slice(); |
982 | 0 | let other = other.as_slice(); |
983 | 0 | let count = Self::batch_count_ones(me.iter().zip(other.iter()).map(|(x, y)| (*x ^ *y))); |
984 | 0 | match other.len().cmp(&me.len()) { |
985 | 0 | Ordering::Greater => count + Self::batch_count_ones(other[me.len()..].iter().copied()), |
986 | 0 | Ordering::Less => count + Self::batch_count_ones(me[other.len()..].iter().copied()), |
987 | 0 | Ordering::Equal => count, |
988 | | } |
989 | 0 | } |
990 | | |
991 | | /// Returns `true` if `self` has no elements in common with `other`. This |
992 | | /// is equivalent to checking for an empty intersection. |
993 | 0 | pub fn is_disjoint(&self, other: &FixedBitSet) -> bool { |
994 | 0 | self.as_simd_slice() |
995 | 0 | .iter() |
996 | 0 | .zip(other.as_simd_slice()) |
997 | 0 | .all(|(x, y)| (*x & *y).is_empty()) |
998 | 0 | } |
999 | | |
1000 | | /// Returns `true` if the set is a subset of another, i.e. `other` contains |
1001 | | /// at least all the values in `self`. |
1002 | 0 | pub fn is_subset(&self, other: &FixedBitSet) -> bool { |
1003 | 0 | let me = self.as_simd_slice(); |
1004 | 0 | let other = other.as_simd_slice(); |
1005 | 0 | me.iter() |
1006 | 0 | .zip(other.iter()) |
1007 | 0 | .all(|(x, y)| x.andnot(*y).is_empty()) |
1008 | 0 | && me.iter().skip(other.len()).all(|x| x.is_empty()) |
1009 | 0 | } |
1010 | | |
1011 | | /// Returns `true` if the set is a superset of another, i.e. `self` contains |
1012 | | /// at least all the values in `other`. |
1013 | 0 | pub fn is_superset(&self, other: &FixedBitSet) -> bool { |
1014 | 0 | other.is_subset(self) |
1015 | 0 | } |
1016 | | } |
1017 | | |
1018 | | impl Hash for FixedBitSet { |
1019 | 0 | fn hash<H: core::hash::Hasher>(&self, state: &mut H) { |
1020 | 0 | self.length.hash(state); |
1021 | 0 | self.as_simd_slice().hash(state); |
1022 | 0 | } |
1023 | | } |
1024 | | |
1025 | | impl PartialEq for FixedBitSet { |
1026 | 0 | fn eq(&self, other: &Self) -> bool { |
1027 | 0 | self.length == other.length && self.as_simd_slice().eq(other.as_simd_slice()) |
1028 | 0 | } |
1029 | | } |
1030 | | |
1031 | | impl PartialOrd for FixedBitSet { |
1032 | 0 | fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
1033 | 0 | Some(self.cmp(other)) |
1034 | 0 | } |
1035 | | } |
1036 | | |
1037 | | impl Ord for FixedBitSet { |
1038 | 0 | fn cmp(&self, other: &Self) -> Ordering { |
1039 | 0 | self.length |
1040 | 0 | .cmp(&other.length) |
1041 | 0 | .then_with(|| self.as_simd_slice().cmp(other.as_simd_slice())) |
1042 | 0 | } |
1043 | | } |
1044 | | |
1045 | | impl Default for FixedBitSet { |
1046 | 0 | fn default() -> Self { |
1047 | 0 | Self::new() |
1048 | 0 | } |
1049 | | } |
1050 | | |
1051 | | impl Drop for FixedBitSet { |
1052 | 0 | fn drop(&mut self) { |
1053 | | // SAFETY: The data pointer and capacity were created from a Vec initially. The block |
1054 | | // len is identical to that of the original. |
1055 | 0 | drop(unsafe { |
1056 | 0 | Vec::from_raw_parts(self.data.as_ptr(), self.simd_block_len(), self.capacity) |
1057 | | }); |
1058 | 0 | } |
1059 | | } |
1060 | | |
1061 | | impl Binary for FixedBitSet { |
1062 | 0 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { |
1063 | 0 | if f.alternate() { |
1064 | 0 | f.write_str("0b")?; |
1065 | 0 | } |
1066 | | |
1067 | 0 | for i in 0..self.length { |
1068 | 0 | if self[i] { |
1069 | 0 | f.write_char('1')?; |
1070 | | } else { |
1071 | 0 | f.write_char('0')?; |
1072 | | } |
1073 | | } |
1074 | | |
1075 | 0 | Ok(()) |
1076 | 0 | } |
1077 | | } |
1078 | | |
1079 | | impl Display for FixedBitSet { |
1080 | 0 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { |
1081 | 0 | Binary::fmt(&self, f) |
1082 | 0 | } |
1083 | | } |
1084 | | |
1085 | | /// An iterator producing elements in the difference of two sets. |
1086 | | /// |
1087 | | /// This struct is created by the [`FixedBitSet::difference`] method. |
1088 | | pub struct Difference<'a> { |
1089 | | iter: Ones<'a>, |
1090 | | other: &'a FixedBitSet, |
1091 | | } |
1092 | | |
1093 | | impl<'a> Iterator for Difference<'a> { |
1094 | | type Item = usize; |
1095 | | |
1096 | | #[inline] |
1097 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1098 | 0 | self.iter.by_ref().find(|&nxt| !self.other.contains(nxt)) |
1099 | 0 | } |
1100 | | |
1101 | | #[inline] |
1102 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1103 | 0 | self.iter.size_hint() |
1104 | 0 | } |
1105 | | } |
1106 | | |
1107 | | impl<'a> DoubleEndedIterator for Difference<'a> { |
1108 | 0 | fn next_back(&mut self) -> Option<Self::Item> { |
1109 | 0 | self.iter |
1110 | 0 | .by_ref() |
1111 | 0 | .rev() |
1112 | 0 | .find(|&nxt| !self.other.contains(nxt)) |
1113 | 0 | } |
1114 | | } |
1115 | | |
1116 | | // Difference will continue to return None once it first returns None. |
1117 | | impl<'a> FusedIterator for Difference<'a> {} |
1118 | | |
1119 | | /// An iterator producing elements in the symmetric difference of two sets. |
1120 | | /// |
1121 | | /// This struct is created by the [`FixedBitSet::symmetric_difference`] method. |
1122 | | pub struct SymmetricDifference<'a> { |
1123 | | iter: Chain<Difference<'a>, Difference<'a>>, |
1124 | | } |
1125 | | |
1126 | | impl<'a> Iterator for SymmetricDifference<'a> { |
1127 | | type Item = usize; |
1128 | | |
1129 | | #[inline] |
1130 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1131 | 0 | self.iter.next() |
1132 | 0 | } |
1133 | | |
1134 | | #[inline] |
1135 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1136 | 0 | self.iter.size_hint() |
1137 | 0 | } |
1138 | | } |
1139 | | |
1140 | | impl<'a> DoubleEndedIterator for SymmetricDifference<'a> { |
1141 | 0 | fn next_back(&mut self) -> Option<Self::Item> { |
1142 | 0 | self.iter.next_back() |
1143 | 0 | } |
1144 | | } |
1145 | | |
1146 | | // SymmetricDifference will continue to return None once it first returns None. |
1147 | | impl<'a> FusedIterator for SymmetricDifference<'a> {} |
1148 | | |
1149 | | /// An iterator producing elements in the intersection of two sets. |
1150 | | /// |
1151 | | /// This struct is created by the [`FixedBitSet::intersection`] method. |
1152 | | pub struct Intersection<'a> { |
1153 | | iter: Ones<'a>, |
1154 | | other: &'a FixedBitSet, |
1155 | | } |
1156 | | |
1157 | | impl<'a> Iterator for Intersection<'a> { |
1158 | | type Item = usize; // the bit position of the '1' |
1159 | | |
1160 | | #[inline] |
1161 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1162 | 0 | self.iter.by_ref().find(|&nxt| self.other.contains(nxt)) |
1163 | 0 | } |
1164 | | |
1165 | | #[inline] |
1166 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1167 | 0 | self.iter.size_hint() |
1168 | 0 | } |
1169 | | } |
1170 | | |
1171 | | impl<'a> DoubleEndedIterator for Intersection<'a> { |
1172 | 0 | fn next_back(&mut self) -> Option<Self::Item> { |
1173 | 0 | self.iter |
1174 | 0 | .by_ref() |
1175 | 0 | .rev() |
1176 | 0 | .find(|&nxt| self.other.contains(nxt)) |
1177 | 0 | } |
1178 | | } |
1179 | | |
1180 | | // Intersection will continue to return None once it first returns None. |
1181 | | impl<'a> FusedIterator for Intersection<'a> {} |
1182 | | |
1183 | | /// An iterator producing elements in the union of two sets. |
1184 | | /// |
1185 | | /// This struct is created by the [`FixedBitSet::union`] method. |
1186 | | pub struct Union<'a> { |
1187 | | iter: Chain<Ones<'a>, Difference<'a>>, |
1188 | | } |
1189 | | |
1190 | | impl<'a> Iterator for Union<'a> { |
1191 | | type Item = usize; |
1192 | | |
1193 | | #[inline] |
1194 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1195 | 0 | self.iter.next() |
1196 | 0 | } |
1197 | | |
1198 | | #[inline] |
1199 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1200 | 0 | self.iter.size_hint() |
1201 | 0 | } |
1202 | | } |
1203 | | |
1204 | | impl<'a> DoubleEndedIterator for Union<'a> { |
1205 | 0 | fn next_back(&mut self) -> Option<Self::Item> { |
1206 | 0 | self.iter.next_back() |
1207 | 0 | } |
1208 | | } |
1209 | | |
1210 | | // Union will continue to return None once it first returns None. |
1211 | | impl<'a> FusedIterator for Union<'a> {} |
1212 | | |
1213 | | struct Masks { |
1214 | | first_block: usize, |
1215 | | first_mask: usize, |
1216 | | last_block: usize, |
1217 | | last_mask: usize, |
1218 | | } |
1219 | | |
1220 | | impl Masks { |
1221 | | #[inline] |
1222 | 0 | fn new<T: IndexRange>(range: T, length: usize) -> Masks { |
1223 | 0 | let start = range.start().unwrap_or(0); |
1224 | 0 | let end = range.end().unwrap_or(length); |
1225 | 0 | assert!( |
1226 | 0 | start <= end && end <= length, |
1227 | 0 | "invalid range {}..{} for a fixedbitset of size {}", |
1228 | | start, |
1229 | | end, |
1230 | | length |
1231 | | ); |
1232 | | |
1233 | 0 | let (first_block, first_rem) = div_rem(start, BITS); |
1234 | 0 | let (last_block, last_rem) = div_rem(end, BITS); |
1235 | | |
1236 | 0 | Masks { |
1237 | 0 | first_block, |
1238 | 0 | first_mask: usize::MAX << first_rem, |
1239 | 0 | last_block, |
1240 | 0 | last_mask: (usize::MAX >> 1) >> (BITS - last_rem - 1), |
1241 | 0 | // this is equivalent to `MAX >> (BITS - x)` with correct semantics when x == 0. |
1242 | 0 | } |
1243 | 0 | } |
1244 | | } |
1245 | | |
1246 | | impl Iterator for Masks { |
1247 | | type Item = (usize, usize); |
1248 | | |
1249 | | #[inline] |
1250 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1251 | 0 | match self.first_block.cmp(&self.last_block) { |
1252 | | Ordering::Less => { |
1253 | 0 | let res = (self.first_block, self.first_mask); |
1254 | 0 | self.first_block += 1; |
1255 | 0 | self.first_mask = !0; |
1256 | 0 | Some(res) |
1257 | | } |
1258 | | Ordering::Equal => { |
1259 | 0 | let mask = self.first_mask & self.last_mask; |
1260 | 0 | let res = if mask == 0 { |
1261 | 0 | None |
1262 | | } else { |
1263 | 0 | Some((self.first_block, mask)) |
1264 | | }; |
1265 | 0 | self.first_block += 1; |
1266 | 0 | res |
1267 | | } |
1268 | 0 | Ordering::Greater => None, |
1269 | | } |
1270 | 0 | } |
1271 | | |
1272 | | #[inline] |
1273 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1274 | 0 | (self.first_block..=self.last_block).size_hint() |
1275 | 0 | } |
1276 | | } |
1277 | | |
1278 | | // Masks will continue to return None once it first returns None. |
1279 | | impl FusedIterator for Masks {} |
1280 | | |
1281 | | // Masks's size_hint implementation is exact. It never returns an |
1282 | | // unbounded value and always returns an exact number of values. |
1283 | | impl ExactSizeIterator for Masks {} |
1284 | | |
1285 | | /// An iterator producing the indices of the set bit in a set. |
1286 | | /// |
1287 | | /// This struct is created by the [`FixedBitSet::ones`] method. |
1288 | | pub struct Ones<'a> { |
1289 | | bitset_front: usize, |
1290 | | bitset_back: usize, |
1291 | | block_idx_front: usize, |
1292 | | block_idx_back: usize, |
1293 | | remaining_blocks: core::slice::Iter<'a, usize>, |
1294 | | } |
1295 | | |
1296 | | impl<'a> Ones<'a> { |
1297 | | #[inline] |
1298 | 0 | pub fn last_positive_bit_and_unset(n: &mut usize) -> usize { |
1299 | | // Find the last set bit using x & -x |
1300 | 0 | let last_bit = *n & n.wrapping_neg(); |
1301 | | |
1302 | | // Find the position of the last set bit |
1303 | 0 | let position = last_bit.trailing_zeros(); |
1304 | | |
1305 | | // Unset the last set bit |
1306 | 0 | *n &= *n - 1; |
1307 | | |
1308 | 0 | position as usize |
1309 | 0 | } |
1310 | | |
1311 | | #[inline] |
1312 | 0 | fn first_positive_bit_and_unset(n: &mut usize) -> usize { |
1313 | | /* Identify the first non zero bit */ |
1314 | 0 | let bit_idx = n.leading_zeros(); |
1315 | | |
1316 | | /* set that bit to zero */ |
1317 | 0 | let mask = !((1_usize) << (BITS as u32 - bit_idx - 1)); |
1318 | 0 | n.bitand_assign(mask); |
1319 | | |
1320 | 0 | bit_idx as usize |
1321 | 0 | } |
1322 | | } |
1323 | | |
1324 | | impl<'a> DoubleEndedIterator for Ones<'a> { |
1325 | 0 | fn next_back(&mut self) -> Option<Self::Item> { |
1326 | 0 | while self.bitset_back == 0 { |
1327 | 0 | match self.remaining_blocks.next_back() { |
1328 | | None => { |
1329 | 0 | if self.bitset_front != 0 { |
1330 | 0 | self.bitset_back = 0; |
1331 | 0 | self.block_idx_back = self.block_idx_front; |
1332 | 0 | return Some( |
1333 | 0 | self.block_idx_front + BITS |
1334 | 0 | - Self::first_positive_bit_and_unset(&mut self.bitset_front) |
1335 | 0 | - 1, |
1336 | 0 | ); |
1337 | | } else { |
1338 | 0 | return None; |
1339 | | } |
1340 | | } |
1341 | 0 | Some(next_block) => { |
1342 | 0 | self.bitset_back = *next_block; |
1343 | 0 | self.block_idx_back -= BITS; |
1344 | 0 | } |
1345 | | }; |
1346 | | } |
1347 | | |
1348 | 0 | Some( |
1349 | 0 | self.block_idx_back - Self::first_positive_bit_and_unset(&mut self.bitset_back) + BITS |
1350 | 0 | - 1, |
1351 | 0 | ) |
1352 | 0 | } |
1353 | | } |
1354 | | |
1355 | | impl<'a> Iterator for Ones<'a> { |
1356 | | type Item = usize; // the bit position of the '1' |
1357 | | |
1358 | | #[inline] |
1359 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1360 | 0 | while self.bitset_front == 0 { |
1361 | 0 | match self.remaining_blocks.next() { |
1362 | 0 | Some(next_block) => { |
1363 | 0 | self.bitset_front = *next_block; |
1364 | 0 | self.block_idx_front += BITS; |
1365 | 0 | } |
1366 | | None => { |
1367 | 0 | if self.bitset_back != 0 { |
1368 | | // not needed for iteration, but for size_hint |
1369 | 0 | self.block_idx_front = self.block_idx_back; |
1370 | 0 | self.bitset_front = 0; |
1371 | | |
1372 | 0 | return Some( |
1373 | 0 | self.block_idx_back |
1374 | 0 | + Self::last_positive_bit_and_unset(&mut self.bitset_back), |
1375 | 0 | ); |
1376 | | } else { |
1377 | 0 | return None; |
1378 | | } |
1379 | | } |
1380 | | }; |
1381 | | } |
1382 | | |
1383 | 0 | Some(self.block_idx_front + Self::last_positive_bit_and_unset(&mut self.bitset_front)) |
1384 | 0 | } |
1385 | | |
1386 | | #[inline] |
1387 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1388 | 0 | ( |
1389 | 0 | 0, |
1390 | 0 | (Some(self.block_idx_back - self.block_idx_front + 2 * BITS)), |
1391 | 0 | ) |
1392 | 0 | } |
1393 | | } |
1394 | | |
1395 | | // Ones will continue to return None once it first returns None. |
1396 | | impl<'a> FusedIterator for Ones<'a> {} |
1397 | | |
1398 | | /// An iterator producing the indices of the set bit in a set. |
1399 | | /// |
1400 | | /// This struct is created by the [`FixedBitSet::ones`] method. |
1401 | | pub struct Zeroes<'a> { |
1402 | | bitset: usize, |
1403 | | block_idx: usize, |
1404 | | len: usize, |
1405 | | remaining_blocks: core::slice::Iter<'a, usize>, |
1406 | | } |
1407 | | |
1408 | | impl<'a> Iterator for Zeroes<'a> { |
1409 | | type Item = usize; // the bit position of the '1' |
1410 | | |
1411 | | #[inline] |
1412 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1413 | 0 | while self.bitset == 0 { |
1414 | 0 | self.bitset = !*self.remaining_blocks.next()?; |
1415 | 0 | self.block_idx += BITS; |
1416 | | } |
1417 | 0 | let t = self.bitset & (0_usize).wrapping_sub(self.bitset); |
1418 | 0 | let r = self.bitset.trailing_zeros() as usize; |
1419 | 0 | self.bitset ^= t; |
1420 | 0 | let bit = self.block_idx + r; |
1421 | | // The remaining zeroes beyond the length of the bitset must be excluded. |
1422 | 0 | if bit < self.len { |
1423 | 0 | Some(bit) |
1424 | | } else { |
1425 | 0 | None |
1426 | | } |
1427 | 0 | } |
1428 | | |
1429 | | #[inline] |
1430 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1431 | 0 | (0, Some(self.len)) |
1432 | 0 | } |
1433 | | } |
1434 | | |
1435 | | // Zeroes will stop returning Some when exhausted. |
1436 | | impl<'a> FusedIterator for Zeroes<'a> {} |
1437 | | |
1438 | | impl Clone for FixedBitSet { |
1439 | | #[inline] |
1440 | 0 | fn clone(&self) -> Self { |
1441 | 0 | Self::from_blocks_and_len(Vec::from(self.as_simd_slice()), self.length) |
1442 | 0 | } |
1443 | | |
1444 | | #[inline] |
1445 | 0 | fn clone_from(&mut self, source: &Self) { |
1446 | 0 | if self.length < source.length { |
1447 | 0 | // SAFETY: `fill` is uninitialized, but is immediately initialized from `source`. |
1448 | 0 | unsafe { self.grow_inner(source.length, MaybeUninit::uninit()) }; |
1449 | 0 | } |
1450 | 0 | let me = self.as_mut_simd_slice_uninit(); |
1451 | 0 | let them = source.as_simd_slice_uninit(); |
1452 | 0 | match me.len().cmp(&them.len()) { |
1453 | 0 | Ordering::Greater => { |
1454 | 0 | let (head, tail) = me.split_at_mut(them.len()); |
1455 | 0 | head.copy_from_slice(them); |
1456 | 0 | tail.fill(MaybeUninit::new(SimdBlock::NONE)); |
1457 | 0 | } |
1458 | 0 | Ordering::Equal => me.copy_from_slice(them), |
1459 | | // The grow_inner above ensures that self is at least as large as source. |
1460 | | // so this branch is unreachable. |
1461 | 0 | Ordering::Less => {} |
1462 | | } |
1463 | 0 | self.length = source.length; |
1464 | 0 | } |
1465 | | } |
1466 | | |
1467 | | /// Return **true** if the bit is enabled in the bitset, |
1468 | | /// or **false** otherwise. |
1469 | | /// |
1470 | | /// Note: bits outside the capacity are always disabled, and thus |
1471 | | /// indexing a FixedBitSet will not panic. |
1472 | | impl Index<usize> for FixedBitSet { |
1473 | | type Output = bool; |
1474 | | |
1475 | | #[inline] |
1476 | 0 | fn index(&self, bit: usize) -> &bool { |
1477 | 0 | if self.contains(bit) { |
1478 | 0 | &true |
1479 | | } else { |
1480 | 0 | &false |
1481 | | } |
1482 | 0 | } |
1483 | | } |
1484 | | |
1485 | | /// Sets the bit at index **i** to **true** for each item **i** in the input **src**. |
1486 | | impl Extend<usize> for FixedBitSet { |
1487 | 0 | fn extend<I: IntoIterator<Item = usize>>(&mut self, src: I) { |
1488 | 0 | let iter = src.into_iter(); |
1489 | 0 | for i in iter { |
1490 | 0 | if i >= self.len() { |
1491 | 0 | self.grow(i + 1); |
1492 | 0 | } |
1493 | 0 | self.put(i); |
1494 | | } |
1495 | 0 | } |
1496 | | } |
1497 | | |
1498 | | /// Return a FixedBitSet containing bits set to **true** for every bit index in |
1499 | | /// the iterator, other bits are set to **false**. |
1500 | | impl FromIterator<usize> for FixedBitSet { |
1501 | 0 | fn from_iter<I: IntoIterator<Item = usize>>(src: I) -> Self { |
1502 | 0 | let mut fbs = FixedBitSet::with_capacity(0); |
1503 | 0 | fbs.extend(src); |
1504 | 0 | fbs |
1505 | 0 | } |
1506 | | } |
1507 | | |
1508 | | pub struct IntoOnes { |
1509 | | bitset_front: Block, |
1510 | | bitset_back: Block, |
1511 | | block_idx_front: usize, |
1512 | | block_idx_back: usize, |
1513 | | remaining_blocks: core::iter::Copied<core::slice::Iter<'static, usize>>, |
1514 | | // Keep buf along so that `remaining_blocks` remains valid. |
1515 | | _buf: Vec<SimdBlock>, |
1516 | | } |
1517 | | |
1518 | | impl IntoOnes { |
1519 | | #[inline] |
1520 | 0 | pub fn last_positive_bit_and_unset(n: &mut Block) -> usize { |
1521 | | // Find the last set bit using x & -x |
1522 | 0 | let last_bit = *n & n.wrapping_neg(); |
1523 | | |
1524 | | // Find the position of the last set bit |
1525 | 0 | let position = last_bit.trailing_zeros(); |
1526 | | |
1527 | | // Unset the last set bit |
1528 | 0 | *n &= *n - 1; |
1529 | | |
1530 | 0 | position as usize |
1531 | 0 | } |
1532 | | |
1533 | | #[inline] |
1534 | 0 | fn first_positive_bit_and_unset(n: &mut Block) -> usize { |
1535 | | /* Identify the first non zero bit */ |
1536 | 0 | let bit_idx = n.leading_zeros(); |
1537 | | |
1538 | | /* set that bit to zero */ |
1539 | 0 | let mask = !((1_usize) << (BITS as u32 - bit_idx - 1)); |
1540 | 0 | n.bitand_assign(mask); |
1541 | | |
1542 | 0 | bit_idx as usize |
1543 | 0 | } |
1544 | | } |
1545 | | |
1546 | | impl DoubleEndedIterator for IntoOnes { |
1547 | 0 | fn next_back(&mut self) -> Option<Self::Item> { |
1548 | 0 | while self.bitset_back == 0 { |
1549 | 0 | match self.remaining_blocks.next_back() { |
1550 | | None => { |
1551 | 0 | if self.bitset_front != 0 { |
1552 | 0 | self.bitset_back = 0; |
1553 | 0 | self.block_idx_back = self.block_idx_front; |
1554 | 0 | return Some( |
1555 | 0 | self.block_idx_front + BITS |
1556 | 0 | - Self::first_positive_bit_and_unset(&mut self.bitset_front) |
1557 | 0 | - 1, |
1558 | 0 | ); |
1559 | | } else { |
1560 | 0 | return None; |
1561 | | } |
1562 | | } |
1563 | 0 | Some(next_block) => { |
1564 | 0 | self.bitset_back = next_block; |
1565 | 0 | self.block_idx_back -= BITS; |
1566 | 0 | } |
1567 | | }; |
1568 | | } |
1569 | | |
1570 | 0 | Some( |
1571 | 0 | self.block_idx_back - Self::first_positive_bit_and_unset(&mut self.bitset_back) + BITS |
1572 | 0 | - 1, |
1573 | 0 | ) |
1574 | 0 | } |
1575 | | } |
1576 | | |
1577 | | impl Iterator for IntoOnes { |
1578 | | type Item = usize; // the bit position of the '1' |
1579 | | |
1580 | | #[inline] |
1581 | 0 | fn next(&mut self) -> Option<Self::Item> { |
1582 | 0 | while self.bitset_front == 0 { |
1583 | 0 | match self.remaining_blocks.next() { |
1584 | 0 | Some(next_block) => { |
1585 | 0 | self.bitset_front = next_block; |
1586 | 0 | self.block_idx_front += BITS; |
1587 | 0 | } |
1588 | | None => { |
1589 | 0 | if self.bitset_back != 0 { |
1590 | | // not needed for iteration, but for size_hint |
1591 | 0 | self.block_idx_front = self.block_idx_back; |
1592 | 0 | self.bitset_front = 0; |
1593 | | |
1594 | 0 | return Some( |
1595 | 0 | self.block_idx_back |
1596 | 0 | + Self::last_positive_bit_and_unset(&mut self.bitset_back), |
1597 | 0 | ); |
1598 | | } else { |
1599 | 0 | return None; |
1600 | | } |
1601 | | } |
1602 | | }; |
1603 | | } |
1604 | | |
1605 | 0 | Some(self.block_idx_front + Self::last_positive_bit_and_unset(&mut self.bitset_front)) |
1606 | 0 | } |
1607 | | |
1608 | | #[inline] |
1609 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
1610 | 0 | ( |
1611 | 0 | 0, |
1612 | 0 | (Some(self.block_idx_back - self.block_idx_front + 2 * BITS)), |
1613 | 0 | ) |
1614 | 0 | } |
1615 | | } |
1616 | | |
1617 | | // Ones will continue to return None once it first returns None. |
1618 | | impl FusedIterator for IntoOnes {} |
1619 | | |
1620 | | impl<'a> BitAnd for &'a FixedBitSet { |
1621 | | type Output = FixedBitSet; |
1622 | 0 | fn bitand(self, other: &FixedBitSet) -> FixedBitSet { |
1623 | 0 | let (short, long) = { |
1624 | 0 | if self.len() <= other.len() { |
1625 | 0 | (self.as_simd_slice(), other.as_simd_slice()) |
1626 | | } else { |
1627 | 0 | (other.as_simd_slice(), self.as_simd_slice()) |
1628 | | } |
1629 | | }; |
1630 | 0 | let mut data = Vec::from(short); |
1631 | 0 | for (data, block) in data.iter_mut().zip(long.iter()) { |
1632 | 0 | *data &= *block; |
1633 | 0 | } |
1634 | 0 | let len = core::cmp::min(self.len(), other.len()); |
1635 | 0 | FixedBitSet::from_blocks_and_len(data, len) |
1636 | 0 | } |
1637 | | } |
1638 | | |
1639 | | impl BitAndAssign for FixedBitSet { |
1640 | 0 | fn bitand_assign(&mut self, other: Self) { |
1641 | 0 | self.intersect_with(&other); |
1642 | 0 | } |
1643 | | } |
1644 | | |
1645 | | impl BitAndAssign<&Self> for FixedBitSet { |
1646 | 0 | fn bitand_assign(&mut self, other: &Self) { |
1647 | 0 | self.intersect_with(other); |
1648 | 0 | } |
1649 | | } |
1650 | | |
1651 | | impl<'a> BitOr for &'a FixedBitSet { |
1652 | | type Output = FixedBitSet; |
1653 | 0 | fn bitor(self, other: &FixedBitSet) -> FixedBitSet { |
1654 | 0 | let (short, long) = { |
1655 | 0 | if self.len() <= other.len() { |
1656 | 0 | (self.as_simd_slice(), other.as_simd_slice()) |
1657 | | } else { |
1658 | 0 | (other.as_simd_slice(), self.as_simd_slice()) |
1659 | | } |
1660 | | }; |
1661 | 0 | let mut data = Vec::from(long); |
1662 | 0 | for (data, block) in data.iter_mut().zip(short.iter()) { |
1663 | 0 | *data |= *block; |
1664 | 0 | } |
1665 | 0 | let len = core::cmp::max(self.len(), other.len()); |
1666 | 0 | FixedBitSet::from_blocks_and_len(data, len) |
1667 | 0 | } |
1668 | | } |
1669 | | |
1670 | | impl BitOrAssign for FixedBitSet { |
1671 | 0 | fn bitor_assign(&mut self, other: Self) { |
1672 | 0 | self.union_with(&other); |
1673 | 0 | } |
1674 | | } |
1675 | | |
1676 | | impl BitOrAssign<&Self> for FixedBitSet { |
1677 | 0 | fn bitor_assign(&mut self, other: &Self) { |
1678 | 0 | self.union_with(other); |
1679 | 0 | } |
1680 | | } |
1681 | | |
1682 | | impl<'a> BitXor for &'a FixedBitSet { |
1683 | | type Output = FixedBitSet; |
1684 | 0 | fn bitxor(self, other: &FixedBitSet) -> FixedBitSet { |
1685 | 0 | let (short, long) = { |
1686 | 0 | if self.len() <= other.len() { |
1687 | 0 | (self.as_simd_slice(), other.as_simd_slice()) |
1688 | | } else { |
1689 | 0 | (other.as_simd_slice(), self.as_simd_slice()) |
1690 | | } |
1691 | | }; |
1692 | 0 | let mut data = Vec::from(long); |
1693 | 0 | for (data, block) in data.iter_mut().zip(short.iter()) { |
1694 | 0 | *data ^= *block; |
1695 | 0 | } |
1696 | 0 | let len = core::cmp::max(self.len(), other.len()); |
1697 | 0 | FixedBitSet::from_blocks_and_len(data, len) |
1698 | 0 | } |
1699 | | } |
1700 | | |
1701 | | impl BitXorAssign for FixedBitSet { |
1702 | 0 | fn bitxor_assign(&mut self, other: Self) { |
1703 | 0 | self.symmetric_difference_with(&other); |
1704 | 0 | } |
1705 | | } |
1706 | | |
1707 | | impl BitXorAssign<&Self> for FixedBitSet { |
1708 | 0 | fn bitxor_assign(&mut self, other: &Self) { |
1709 | 0 | self.symmetric_difference_with(other); |
1710 | 0 | } |
1711 | | } |