/rust/registry/src/index.crates.io-6f17d22bba15001f/bitvec-1.0.1/src/vec/ops.rs
Line | Count | Source (jump to first uncovered line) |
1 | | //! Operator trait implementations for bit-vectors. |
2 | | |
3 | | use core::{ |
4 | | mem::ManuallyDrop, |
5 | | ops::{ |
6 | | BitAnd, |
7 | | BitAndAssign, |
8 | | BitOr, |
9 | | BitOrAssign, |
10 | | BitXor, |
11 | | BitXorAssign, |
12 | | Deref, |
13 | | DerefMut, |
14 | | Index, |
15 | | IndexMut, |
16 | | Not, |
17 | | }, |
18 | | }; |
19 | | |
20 | | use wyz::comu::Mut; |
21 | | |
22 | | use super::BitVec; |
23 | | use crate::{ |
24 | | order::BitOrder, |
25 | | ptr::BitSpan, |
26 | | slice::BitSlice, |
27 | | store::BitStore, |
28 | | }; |
29 | | |
30 | | #[cfg(not(tarpaulin_include))] |
31 | | impl<T, O> BitAndAssign<BitVec<T, O>> for BitSlice<T, O> |
32 | | where |
33 | | T: BitStore, |
34 | | O: BitOrder, |
35 | | { |
36 | | #[inline] |
37 | 0 | fn bitand_assign(&mut self, rhs: BitVec<T, O>) { |
38 | 0 | *self &= rhs.as_bitslice() |
39 | 0 | } |
40 | | } |
41 | | |
42 | | #[cfg(not(tarpaulin_include))] |
43 | | impl<T, O> BitAndAssign<&BitVec<T, O>> for BitSlice<T, O> |
44 | | where |
45 | | T: BitStore, |
46 | | O: BitOrder, |
47 | | { |
48 | | #[inline] |
49 | 0 | fn bitand_assign(&mut self, rhs: &BitVec<T, O>) { |
50 | 0 | *self &= rhs.as_bitslice() |
51 | 0 | } |
52 | | } |
53 | | |
54 | | #[cfg(not(tarpaulin_include))] |
55 | | impl<T, O, Rhs> BitAnd<Rhs> for BitVec<T, O> |
56 | | where |
57 | | T: BitStore, |
58 | | O: BitOrder, |
59 | | BitSlice<T, O>: BitAndAssign<Rhs>, |
60 | | { |
61 | | type Output = Self; |
62 | | |
63 | | #[inline] |
64 | 0 | fn bitand(mut self, rhs: Rhs) -> Self::Output { |
65 | 0 | self &= rhs; |
66 | 0 | self |
67 | 0 | } |
68 | | } |
69 | | |
70 | | #[cfg(not(tarpaulin_include))] |
71 | | impl<T, O, Rhs> BitAndAssign<Rhs> for BitVec<T, O> |
72 | | where |
73 | | T: BitStore, |
74 | | O: BitOrder, |
75 | | BitSlice<T, O>: BitAndAssign<Rhs>, |
76 | | { |
77 | | #[inline] |
78 | 0 | fn bitand_assign(&mut self, rhs: Rhs) { |
79 | 0 | *self.as_mut_bitslice() &= rhs; |
80 | 0 | } |
81 | | } |
82 | | |
83 | | #[cfg(not(tarpaulin_include))] |
84 | | impl<T, O> BitOrAssign<BitVec<T, O>> for BitSlice<T, O> |
85 | | where |
86 | | T: BitStore, |
87 | | O: BitOrder, |
88 | | { |
89 | | #[inline] |
90 | 0 | fn bitor_assign(&mut self, rhs: BitVec<T, O>) { |
91 | 0 | *self |= rhs.as_bitslice() |
92 | 0 | } |
93 | | } |
94 | | |
95 | | #[cfg(not(tarpaulin_include))] |
96 | | impl<T, O> BitOrAssign<&BitVec<T, O>> for BitSlice<T, O> |
97 | | where |
98 | | T: BitStore, |
99 | | O: BitOrder, |
100 | | { |
101 | | #[inline] |
102 | 0 | fn bitor_assign(&mut self, rhs: &BitVec<T, O>) { |
103 | 0 | *self |= rhs.as_bitslice() |
104 | 0 | } |
105 | | } |
106 | | |
107 | | #[cfg(not(tarpaulin_include))] |
108 | | impl<T, O, Rhs> BitOr<Rhs> for BitVec<T, O> |
109 | | where |
110 | | T: BitStore, |
111 | | O: BitOrder, |
112 | | BitSlice<T, O>: BitOrAssign<Rhs>, |
113 | | { |
114 | | type Output = Self; |
115 | | |
116 | | #[inline] |
117 | 0 | fn bitor(mut self, rhs: Rhs) -> Self::Output { |
118 | 0 | self |= rhs; |
119 | 0 | self |
120 | 0 | } |
121 | | } |
122 | | |
123 | | #[cfg(not(tarpaulin_include))] |
124 | | impl<T, O, Rhs> BitOrAssign<Rhs> for BitVec<T, O> |
125 | | where |
126 | | T: BitStore, |
127 | | O: BitOrder, |
128 | | BitSlice<T, O>: BitOrAssign<Rhs>, |
129 | | { |
130 | | #[inline] |
131 | 0 | fn bitor_assign(&mut self, rhs: Rhs) { |
132 | 0 | *self.as_mut_bitslice() |= rhs; |
133 | 0 | } |
134 | | } |
135 | | |
136 | | #[cfg(not(tarpaulin_include))] |
137 | | impl<T, O> BitXorAssign<BitVec<T, O>> for BitSlice<T, O> |
138 | | where |
139 | | T: BitStore, |
140 | | O: BitOrder, |
141 | | { |
142 | | #[inline] |
143 | 0 | fn bitxor_assign(&mut self, rhs: BitVec<T, O>) { |
144 | 0 | *self ^= rhs.as_bitslice() |
145 | 0 | } |
146 | | } |
147 | | |
148 | | #[cfg(not(tarpaulin_include))] |
149 | | impl<T, O> BitXorAssign<&BitVec<T, O>> for BitSlice<T, O> |
150 | | where |
151 | | T: BitStore, |
152 | | O: BitOrder, |
153 | | { |
154 | | #[inline] |
155 | 0 | fn bitxor_assign(&mut self, rhs: &BitVec<T, O>) { |
156 | 0 | *self ^= rhs.as_bitslice() |
157 | 0 | } |
158 | | } |
159 | | |
160 | | #[cfg(not(tarpaulin_include))] |
161 | | impl<T, O, Rhs> BitXor<Rhs> for BitVec<T, O> |
162 | | where |
163 | | T: BitStore, |
164 | | O: BitOrder, |
165 | | BitSlice<T, O>: BitXorAssign<Rhs>, |
166 | | { |
167 | | type Output = Self; |
168 | | |
169 | | #[inline] |
170 | 0 | fn bitxor(mut self, rhs: Rhs) -> Self::Output { |
171 | 0 | self ^= rhs; |
172 | 0 | self |
173 | 0 | } |
174 | | } |
175 | | |
176 | | #[cfg(not(tarpaulin_include))] |
177 | | impl<T, O, Rhs> BitXorAssign<Rhs> for BitVec<T, O> |
178 | | where |
179 | | T: BitStore, |
180 | | O: BitOrder, |
181 | | BitSlice<T, O>: BitXorAssign<Rhs>, |
182 | | { |
183 | | #[inline] |
184 | 0 | fn bitxor_assign(&mut self, rhs: Rhs) { |
185 | 0 | *self.as_mut_bitslice() ^= rhs; |
186 | 0 | } |
187 | | } |
188 | | |
189 | | impl<T, O> Deref for BitVec<T, O> |
190 | | where |
191 | | T: BitStore, |
192 | | O: BitOrder, |
193 | | { |
194 | | type Target = BitSlice<T, O>; |
195 | | |
196 | | #[inline] |
197 | 0 | fn deref(&self) -> &Self::Target { |
198 | 0 | self.as_bitslice() |
199 | 0 | } |
200 | | } |
201 | | |
202 | | impl<T, O> DerefMut for BitVec<T, O> |
203 | | where |
204 | | T: BitStore, |
205 | | O: BitOrder, |
206 | | { |
207 | | #[inline] |
208 | 0 | fn deref_mut(&mut self) -> &mut Self::Target { |
209 | 0 | self.as_mut_bitslice() |
210 | 0 | } |
211 | | } |
212 | | |
213 | | impl<T, O> Drop for BitVec<T, O> |
214 | | where |
215 | | T: BitStore, |
216 | | O: BitOrder, |
217 | | { |
218 | | #[inline] |
219 | 0 | fn drop(&mut self) { |
220 | 0 | if self.bitspan != BitSpan::<Mut, T, O>::EMPTY { |
221 | 0 | self.with_vec(|slot| unsafe { ManuallyDrop::drop(slot) }); |
222 | 0 | } |
223 | 0 | } |
224 | | } |
225 | | |
226 | | #[cfg(not(tarpaulin_include))] |
227 | | impl<T, O, Idx> Index<Idx> for BitVec<T, O> |
228 | | where |
229 | | T: BitStore, |
230 | | O: BitOrder, |
231 | | BitSlice<T, O>: Index<Idx>, |
232 | | { |
233 | | type Output = <BitSlice<T, O> as Index<Idx>>::Output; |
234 | | |
235 | | #[inline] |
236 | 0 | fn index(&self, index: Idx) -> &Self::Output { |
237 | 0 | &self.as_bitslice()[index] |
238 | 0 | } |
239 | | } |
240 | | |
241 | | #[cfg(not(tarpaulin_include))] |
242 | | impl<T, O, Idx> IndexMut<Idx> for BitVec<T, O> |
243 | | where |
244 | | T: BitStore, |
245 | | O: BitOrder, |
246 | | BitSlice<T, O>: IndexMut<Idx>, |
247 | | { |
248 | | #[inline] |
249 | 0 | fn index_mut(&mut self, index: Idx) -> &mut Self::Output { |
250 | 0 | &mut self.as_mut_bitslice()[index] |
251 | 0 | } |
252 | | } |
253 | | |
254 | | /** This implementation inverts all elements in the live buffer. You cannot rely |
255 | | on the value of bits in the buffer that are outside the domain of |
256 | | [`BitVec::as_mut_bitslice`]. |
257 | | **/ |
258 | | impl<T, O> Not for BitVec<T, O> |
259 | | where |
260 | | T: BitStore, |
261 | | O: BitOrder, |
262 | | { |
263 | | type Output = Self; |
264 | | |
265 | | #[inline] |
266 | 0 | fn not(mut self) -> Self::Output { |
267 | 0 | for elem in self.as_raw_mut_slice() { |
268 | 0 | elem.store_value(!elem.load_value()); |
269 | 0 | } |
270 | 0 | self |
271 | 0 | } |
272 | | } |