/rust/registry/src/index.crates.io-6f17d22bba15001f/bytes-1.10.1/src/bytes_mut.rs
Line | Count | Source (jump to first uncovered line) |
1 | | use core::iter::FromIterator; |
2 | | use core::mem::{self, ManuallyDrop, MaybeUninit}; |
3 | | use core::ops::{Deref, DerefMut}; |
4 | | use core::ptr::{self, NonNull}; |
5 | | use core::{cmp, fmt, hash, isize, slice, usize}; |
6 | | |
7 | | use alloc::{ |
8 | | borrow::{Borrow, BorrowMut}, |
9 | | boxed::Box, |
10 | | string::String, |
11 | | vec, |
12 | | vec::Vec, |
13 | | }; |
14 | | |
15 | | use crate::buf::{IntoIter, UninitSlice}; |
16 | | use crate::bytes::Vtable; |
17 | | #[allow(unused)] |
18 | | use crate::loom::sync::atomic::AtomicMut; |
19 | | use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; |
20 | | use crate::{offset_from, Buf, BufMut, Bytes, TryGetError}; |
21 | | |
22 | | /// A unique reference to a contiguous slice of memory. |
23 | | /// |
24 | | /// `BytesMut` represents a unique view into a potentially shared memory region. |
25 | | /// Given the uniqueness guarantee, owners of `BytesMut` handles are able to |
26 | | /// mutate the memory. |
27 | | /// |
28 | | /// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset |
29 | | /// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the |
30 | | /// same `buf` overlaps with its slice. That guarantee means that a write lock |
31 | | /// is not required. |
32 | | /// |
33 | | /// # Growth |
34 | | /// |
35 | | /// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as |
36 | | /// necessary. However, explicitly reserving the required space up-front before |
37 | | /// a series of inserts will be more efficient. |
38 | | /// |
39 | | /// # Examples |
40 | | /// |
41 | | /// ``` |
42 | | /// use bytes::{BytesMut, BufMut}; |
43 | | /// |
44 | | /// let mut buf = BytesMut::with_capacity(64); |
45 | | /// |
46 | | /// buf.put_u8(b'h'); |
47 | | /// buf.put_u8(b'e'); |
48 | | /// buf.put(&b"llo"[..]); |
49 | | /// |
50 | | /// assert_eq!(&buf[..], b"hello"); |
51 | | /// |
52 | | /// // Freeze the buffer so that it can be shared |
53 | | /// let a = buf.freeze(); |
54 | | /// |
55 | | /// // This does not allocate, instead `b` points to the same memory. |
56 | | /// let b = a.clone(); |
57 | | /// |
58 | | /// assert_eq!(&a[..], b"hello"); |
59 | | /// assert_eq!(&b[..], b"hello"); |
60 | | /// ``` |
61 | | pub struct BytesMut { |
62 | | ptr: NonNull<u8>, |
63 | | len: usize, |
64 | | cap: usize, |
65 | | data: *mut Shared, |
66 | | } |
67 | | |
68 | | // Thread-safe reference-counted container for the shared storage. This mostly |
69 | | // the same as `core::sync::Arc` but without the weak counter. The ref counting |
70 | | // fns are based on the ones found in `std`. |
71 | | // |
72 | | // The main reason to use `Shared` instead of `core::sync::Arc` is that it ends |
73 | | // up making the overall code simpler and easier to reason about. This is due to |
74 | | // some of the logic around setting `Inner::arc` and other ways the `arc` field |
75 | | // is used. Using `Arc` ended up requiring a number of funky transmutes and |
76 | | // other shenanigans to make it work. |
77 | | struct Shared { |
78 | | vec: Vec<u8>, |
79 | | original_capacity_repr: usize, |
80 | | ref_count: AtomicUsize, |
81 | | } |
82 | | |
83 | | // Assert that the alignment of `Shared` is divisible by 2. |
84 | | // This is a necessary invariant since we depend on allocating `Shared` a |
85 | | // shared object to implicitly carry the `KIND_ARC` flag in its pointer. |
86 | | // This flag is set when the LSB is 0. |
87 | | const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. |
88 | | |
89 | | // Buffer storage strategy flags. |
90 | | const KIND_ARC: usize = 0b0; |
91 | | const KIND_VEC: usize = 0b1; |
92 | | const KIND_MASK: usize = 0b1; |
93 | | |
94 | | // The max original capacity value. Any `Bytes` allocated with a greater initial |
95 | | // capacity will default to this. |
96 | | const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; |
97 | | // The original capacity algorithm will not take effect unless the originally |
98 | | // allocated capacity was at least 1kb in size. |
99 | | const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; |
100 | | // The original capacity is stored in powers of 2 starting at 1kb to a max of |
101 | | // 64kb. Representing it as such requires only 3 bits of storage. |
102 | | const ORIGINAL_CAPACITY_MASK: usize = 0b11100; |
103 | | const ORIGINAL_CAPACITY_OFFSET: usize = 2; |
104 | | |
105 | | const VEC_POS_OFFSET: usize = 5; |
106 | | // When the storage is in the `Vec` representation, the pointer can be advanced |
107 | | // at most this value. This is due to the amount of storage available to track |
108 | | // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY |
109 | | // bits. |
110 | | const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; |
111 | | const NOT_VEC_POS_MASK: usize = 0b11111; |
112 | | |
113 | | #[cfg(target_pointer_width = "64")] |
114 | | const PTR_WIDTH: usize = 64; |
115 | | #[cfg(target_pointer_width = "32")] |
116 | | const PTR_WIDTH: usize = 32; |
117 | | |
118 | | /* |
119 | | * |
120 | | * ===== BytesMut ===== |
121 | | * |
122 | | */ |
123 | | |
124 | | impl BytesMut { |
125 | | /// Creates a new `BytesMut` with the specified capacity. |
126 | | /// |
127 | | /// The returned `BytesMut` will be able to hold at least `capacity` bytes |
128 | | /// without reallocating. |
129 | | /// |
130 | | /// It is important to note that this function does not specify the length |
131 | | /// of the returned `BytesMut`, but only the capacity. |
132 | | /// |
133 | | /// # Examples |
134 | | /// |
135 | | /// ``` |
136 | | /// use bytes::{BytesMut, BufMut}; |
137 | | /// |
138 | | /// let mut bytes = BytesMut::with_capacity(64); |
139 | | /// |
140 | | /// // `bytes` contains no data, even though there is capacity |
141 | | /// assert_eq!(bytes.len(), 0); |
142 | | /// |
143 | | /// bytes.put(&b"hello world"[..]); |
144 | | /// |
145 | | /// assert_eq!(&bytes[..], b"hello world"); |
146 | | /// ``` |
147 | | #[inline] |
148 | 0 | pub fn with_capacity(capacity: usize) -> BytesMut { |
149 | 0 | BytesMut::from_vec(Vec::with_capacity(capacity)) |
150 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::with_capacity Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::with_capacity |
151 | | |
152 | | /// Creates a new `BytesMut` with default capacity. |
153 | | /// |
154 | | /// Resulting object has length 0 and unspecified capacity. |
155 | | /// This function does not allocate. |
156 | | /// |
157 | | /// # Examples |
158 | | /// |
159 | | /// ``` |
160 | | /// use bytes::{BytesMut, BufMut}; |
161 | | /// |
162 | | /// let mut bytes = BytesMut::new(); |
163 | | /// |
164 | | /// assert_eq!(0, bytes.len()); |
165 | | /// |
166 | | /// bytes.reserve(2); |
167 | | /// bytes.put_slice(b"xy"); |
168 | | /// |
169 | | /// assert_eq!(&b"xy"[..], &bytes[..]); |
170 | | /// ``` |
171 | | #[inline] |
172 | 0 | pub fn new() -> BytesMut { |
173 | 0 | BytesMut::with_capacity(0) |
174 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::new Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::new |
175 | | |
176 | | /// Returns the number of bytes contained in this `BytesMut`. |
177 | | /// |
178 | | /// # Examples |
179 | | /// |
180 | | /// ``` |
181 | | /// use bytes::BytesMut; |
182 | | /// |
183 | | /// let b = BytesMut::from(&b"hello"[..]); |
184 | | /// assert_eq!(b.len(), 5); |
185 | | /// ``` |
186 | | #[inline] |
187 | 0 | pub fn len(&self) -> usize { |
188 | 0 | self.len |
189 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::len Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::len |
190 | | |
191 | | /// Returns true if the `BytesMut` has a length of 0. |
192 | | /// |
193 | | /// # Examples |
194 | | /// |
195 | | /// ``` |
196 | | /// use bytes::BytesMut; |
197 | | /// |
198 | | /// let b = BytesMut::with_capacity(64); |
199 | | /// assert!(b.is_empty()); |
200 | | /// ``` |
201 | | #[inline] |
202 | 0 | pub fn is_empty(&self) -> bool { |
203 | 0 | self.len == 0 |
204 | 0 | } |
205 | | |
206 | | /// Returns the number of bytes the `BytesMut` can hold without reallocating. |
207 | | /// |
208 | | /// # Examples |
209 | | /// |
210 | | /// ``` |
211 | | /// use bytes::BytesMut; |
212 | | /// |
213 | | /// let b = BytesMut::with_capacity(64); |
214 | | /// assert_eq!(b.capacity(), 64); |
215 | | /// ``` |
216 | | #[inline] |
217 | 0 | pub fn capacity(&self) -> usize { |
218 | 0 | self.cap |
219 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::capacity Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::capacity |
220 | | |
221 | | /// Converts `self` into an immutable `Bytes`. |
222 | | /// |
223 | | /// The conversion is zero cost and is used to indicate that the slice |
224 | | /// referenced by the handle will no longer be mutated. Once the conversion |
225 | | /// is done, the handle can be cloned and shared across threads. |
226 | | /// |
227 | | /// # Examples |
228 | | /// |
229 | | /// ``` |
230 | | /// use bytes::{BytesMut, BufMut}; |
231 | | /// use std::thread; |
232 | | /// |
233 | | /// let mut b = BytesMut::with_capacity(64); |
234 | | /// b.put(&b"hello world"[..]); |
235 | | /// let b1 = b.freeze(); |
236 | | /// let b2 = b1.clone(); |
237 | | /// |
238 | | /// let th = thread::spawn(move || { |
239 | | /// assert_eq!(&b1[..], b"hello world"); |
240 | | /// }); |
241 | | /// |
242 | | /// assert_eq!(&b2[..], b"hello world"); |
243 | | /// th.join().unwrap(); |
244 | | /// ``` |
245 | | #[inline] |
246 | 0 | pub fn freeze(self) -> Bytes { |
247 | 0 | let bytes = ManuallyDrop::new(self); |
248 | 0 | if bytes.kind() == KIND_VEC { |
249 | | // Just re-use `Bytes` internal Vec vtable |
250 | | unsafe { |
251 | 0 | let off = bytes.get_vec_pos(); |
252 | 0 | let vec = rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off); |
253 | 0 | let mut b: Bytes = vec.into(); |
254 | 0 | b.advance(off); |
255 | 0 | b |
256 | | } |
257 | | } else { |
258 | 0 | debug_assert_eq!(bytes.kind(), KIND_ARC); |
259 | | |
260 | 0 | let ptr = bytes.ptr.as_ptr(); |
261 | 0 | let len = bytes.len; |
262 | 0 | let data = AtomicPtr::new(bytes.data.cast()); |
263 | 0 | unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } |
264 | | } |
265 | 0 | } |
266 | | |
267 | | /// Creates a new `BytesMut` containing `len` zeros. |
268 | | /// |
269 | | /// The resulting object has a length of `len` and a capacity greater |
270 | | /// than or equal to `len`. The entire length of the object will be filled |
271 | | /// with zeros. |
272 | | /// |
273 | | /// On some platforms or allocators this function may be faster than |
274 | | /// a manual implementation. |
275 | | /// |
276 | | /// # Examples |
277 | | /// |
278 | | /// ``` |
279 | | /// use bytes::BytesMut; |
280 | | /// |
281 | | /// let zeros = BytesMut::zeroed(42); |
282 | | /// |
283 | | /// assert!(zeros.capacity() >= 42); |
284 | | /// assert_eq!(zeros.len(), 42); |
285 | | /// zeros.into_iter().for_each(|x| assert_eq!(x, 0)); |
286 | | /// ``` |
287 | 0 | pub fn zeroed(len: usize) -> BytesMut { |
288 | 0 | BytesMut::from_vec(vec![0; len]) |
289 | 0 | } |
290 | | |
291 | | /// Splits the bytes into two at the given index. |
292 | | /// |
293 | | /// Afterwards `self` contains elements `[0, at)`, and the returned |
294 | | /// `BytesMut` contains elements `[at, capacity)`. It's guaranteed that the |
295 | | /// memory does not move, that is, the address of `self` does not change, |
296 | | /// and the address of the returned slice is `at` bytes after that. |
297 | | /// |
298 | | /// This is an `O(1)` operation that just increases the reference count |
299 | | /// and sets a few indices. |
300 | | /// |
301 | | /// # Examples |
302 | | /// |
303 | | /// ``` |
304 | | /// use bytes::BytesMut; |
305 | | /// |
306 | | /// let mut a = BytesMut::from(&b"hello world"[..]); |
307 | | /// let mut b = a.split_off(5); |
308 | | /// |
309 | | /// a[0] = b'j'; |
310 | | /// b[0] = b'!'; |
311 | | /// |
312 | | /// assert_eq!(&a[..], b"jello"); |
313 | | /// assert_eq!(&b[..], b"!world"); |
314 | | /// ``` |
315 | | /// |
316 | | /// # Panics |
317 | | /// |
318 | | /// Panics if `at > capacity`. |
319 | | #[must_use = "consider BytesMut::truncate if you don't need the other half"] |
320 | 0 | pub fn split_off(&mut self, at: usize) -> BytesMut { |
321 | 0 | assert!( |
322 | 0 | at <= self.capacity(), |
323 | 0 | "split_off out of bounds: {:?} <= {:?}", |
324 | 0 | at, |
325 | 0 | self.capacity(), |
326 | | ); |
327 | | unsafe { |
328 | 0 | let mut other = self.shallow_clone(); |
329 | 0 | // SAFETY: We've checked that `at` <= `self.capacity()` above. |
330 | 0 | other.advance_unchecked(at); |
331 | 0 | self.cap = at; |
332 | 0 | self.len = cmp::min(self.len, at); |
333 | 0 | other |
334 | 0 | } |
335 | 0 | } |
336 | | |
337 | | /// Removes the bytes from the current view, returning them in a new |
338 | | /// `BytesMut` handle. |
339 | | /// |
340 | | /// Afterwards, `self` will be empty, but will retain any additional |
341 | | /// capacity that it had before the operation. This is identical to |
342 | | /// `self.split_to(self.len())`. |
343 | | /// |
344 | | /// This is an `O(1)` operation that just increases the reference count and |
345 | | /// sets a few indices. |
346 | | /// |
347 | | /// # Examples |
348 | | /// |
349 | | /// ``` |
350 | | /// use bytes::{BytesMut, BufMut}; |
351 | | /// |
352 | | /// let mut buf = BytesMut::with_capacity(1024); |
353 | | /// buf.put(&b"hello world"[..]); |
354 | | /// |
355 | | /// let other = buf.split(); |
356 | | /// |
357 | | /// assert!(buf.is_empty()); |
358 | | /// assert_eq!(1013, buf.capacity()); |
359 | | /// |
360 | | /// assert_eq!(other, b"hello world"[..]); |
361 | | /// ``` |
362 | | #[must_use = "consider BytesMut::clear if you don't need the other half"] |
363 | 0 | pub fn split(&mut self) -> BytesMut { |
364 | 0 | let len = self.len(); |
365 | 0 | self.split_to(len) |
366 | 0 | } |
367 | | |
368 | | /// Splits the buffer into two at the given index. |
369 | | /// |
370 | | /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` |
371 | | /// contains elements `[0, at)`. |
372 | | /// |
373 | | /// This is an `O(1)` operation that just increases the reference count and |
374 | | /// sets a few indices. |
375 | | /// |
376 | | /// # Examples |
377 | | /// |
378 | | /// ``` |
379 | | /// use bytes::BytesMut; |
380 | | /// |
381 | | /// let mut a = BytesMut::from(&b"hello world"[..]); |
382 | | /// let mut b = a.split_to(5); |
383 | | /// |
384 | | /// a[0] = b'!'; |
385 | | /// b[0] = b'j'; |
386 | | /// |
387 | | /// assert_eq!(&a[..], b"!world"); |
388 | | /// assert_eq!(&b[..], b"jello"); |
389 | | /// ``` |
390 | | /// |
391 | | /// # Panics |
392 | | /// |
393 | | /// Panics if `at > len`. |
394 | | #[must_use = "consider BytesMut::advance if you don't need the other half"] |
395 | 0 | pub fn split_to(&mut self, at: usize) -> BytesMut { |
396 | 0 | assert!( |
397 | 0 | at <= self.len(), |
398 | 0 | "split_to out of bounds: {:?} <= {:?}", |
399 | 0 | at, |
400 | 0 | self.len(), |
401 | | ); |
402 | | |
403 | | unsafe { |
404 | 0 | let mut other = self.shallow_clone(); |
405 | 0 | // SAFETY: We've checked that `at` <= `self.len()` and we know that `self.len()` <= |
406 | 0 | // `self.capacity()`. |
407 | 0 | self.advance_unchecked(at); |
408 | 0 | other.cap = at; |
409 | 0 | other.len = at; |
410 | 0 | other |
411 | 0 | } |
412 | 0 | } |
413 | | |
414 | | /// Shortens the buffer, keeping the first `len` bytes and dropping the |
415 | | /// rest. |
416 | | /// |
417 | | /// If `len` is greater than the buffer's current length, this has no |
418 | | /// effect. |
419 | | /// |
420 | | /// Existing underlying capacity is preserved. |
421 | | /// |
422 | | /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the |
423 | | /// excess bytes to be returned instead of dropped. |
424 | | /// |
425 | | /// # Examples |
426 | | /// |
427 | | /// ``` |
428 | | /// use bytes::BytesMut; |
429 | | /// |
430 | | /// let mut buf = BytesMut::from(&b"hello world"[..]); |
431 | | /// buf.truncate(5); |
432 | | /// assert_eq!(buf, b"hello"[..]); |
433 | | /// ``` |
434 | 0 | pub fn truncate(&mut self, len: usize) { |
435 | 0 | if len <= self.len() { |
436 | 0 | // SAFETY: Shrinking the buffer cannot expose uninitialized bytes. |
437 | 0 | unsafe { self.set_len(len) }; |
438 | 0 | } |
439 | 0 | } |
440 | | |
441 | | /// Clears the buffer, removing all data. Existing capacity is preserved. |
442 | | /// |
443 | | /// # Examples |
444 | | /// |
445 | | /// ``` |
446 | | /// use bytes::BytesMut; |
447 | | /// |
448 | | /// let mut buf = BytesMut::from(&b"hello world"[..]); |
449 | | /// buf.clear(); |
450 | | /// assert!(buf.is_empty()); |
451 | | /// ``` |
452 | 0 | pub fn clear(&mut self) { |
453 | 0 | // SAFETY: Setting the length to zero cannot expose uninitialized bytes. |
454 | 0 | unsafe { self.set_len(0) }; |
455 | 0 | } |
456 | | |
457 | | /// Resizes the buffer so that `len` is equal to `new_len`. |
458 | | /// |
459 | | /// If `new_len` is greater than `len`, the buffer is extended by the |
460 | | /// difference with each additional byte set to `value`. If `new_len` is |
461 | | /// less than `len`, the buffer is simply truncated. |
462 | | /// |
463 | | /// # Examples |
464 | | /// |
465 | | /// ``` |
466 | | /// use bytes::BytesMut; |
467 | | /// |
468 | | /// let mut buf = BytesMut::new(); |
469 | | /// |
470 | | /// buf.resize(3, 0x1); |
471 | | /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); |
472 | | /// |
473 | | /// buf.resize(2, 0x2); |
474 | | /// assert_eq!(&buf[..], &[0x1, 0x1]); |
475 | | /// |
476 | | /// buf.resize(4, 0x3); |
477 | | /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); |
478 | | /// ``` |
479 | 0 | pub fn resize(&mut self, new_len: usize, value: u8) { |
480 | 0 | let additional = if let Some(additional) = new_len.checked_sub(self.len()) { |
481 | 0 | additional |
482 | | } else { |
483 | 0 | self.truncate(new_len); |
484 | 0 | return; |
485 | | }; |
486 | | |
487 | 0 | if additional == 0 { |
488 | 0 | return; |
489 | 0 | } |
490 | 0 |
|
491 | 0 | self.reserve(additional); |
492 | 0 | let dst = self.spare_capacity_mut().as_mut_ptr(); |
493 | 0 | // SAFETY: `spare_capacity_mut` returns a valid, properly aligned pointer and we've |
494 | 0 | // reserved enough space to write `additional` bytes. |
495 | 0 | unsafe { ptr::write_bytes(dst, value, additional) }; |
496 | 0 |
|
497 | 0 | // SAFETY: There are at least `new_len` initialized bytes in the buffer so no |
498 | 0 | // uninitialized bytes are being exposed. |
499 | 0 | unsafe { self.set_len(new_len) }; |
500 | 0 | } |
501 | | |
502 | | /// Sets the length of the buffer. |
503 | | /// |
504 | | /// This will explicitly set the size of the buffer without actually |
505 | | /// modifying the data, so it is up to the caller to ensure that the data |
506 | | /// has been initialized. |
507 | | /// |
508 | | /// # Examples |
509 | | /// |
510 | | /// ``` |
511 | | /// use bytes::BytesMut; |
512 | | /// |
513 | | /// let mut b = BytesMut::from(&b"hello world"[..]); |
514 | | /// |
515 | | /// unsafe { |
516 | | /// b.set_len(5); |
517 | | /// } |
518 | | /// |
519 | | /// assert_eq!(&b[..], b"hello"); |
520 | | /// |
521 | | /// unsafe { |
522 | | /// b.set_len(11); |
523 | | /// } |
524 | | /// |
525 | | /// assert_eq!(&b[..], b"hello world"); |
526 | | /// ``` |
527 | | #[inline] |
528 | 0 | pub unsafe fn set_len(&mut self, len: usize) { |
529 | 0 | debug_assert!(len <= self.cap, "set_len out of bounds"); |
530 | 0 | self.len = len; |
531 | 0 | } |
532 | | |
533 | | /// Reserves capacity for at least `additional` more bytes to be inserted |
534 | | /// into the given `BytesMut`. |
535 | | /// |
536 | | /// More than `additional` bytes may be reserved in order to avoid frequent |
537 | | /// reallocations. A call to `reserve` may result in an allocation. |
538 | | /// |
539 | | /// Before allocating new buffer space, the function will attempt to reclaim |
540 | | /// space in the existing buffer. If the current handle references a view |
541 | | /// into a larger original buffer, and all other handles referencing part |
542 | | /// of the same original buffer have been dropped, then the current view |
543 | | /// can be copied/shifted to the front of the buffer and the handle can take |
544 | | /// ownership of the full buffer, provided that the full buffer is large |
545 | | /// enough to fit the requested additional capacity. |
546 | | /// |
547 | | /// This optimization will only happen if shifting the data from the current |
548 | | /// view to the front of the buffer is not too expensive in terms of the |
549 | | /// (amortized) time required. The precise condition is subject to change; |
550 | | /// as of now, the length of the data being shifted needs to be at least as |
551 | | /// large as the distance that it's shifted by. If the current view is empty |
552 | | /// and the original buffer is large enough to fit the requested additional |
553 | | /// capacity, then reallocations will never happen. |
554 | | /// |
555 | | /// # Examples |
556 | | /// |
557 | | /// In the following example, a new buffer is allocated. |
558 | | /// |
559 | | /// ``` |
560 | | /// use bytes::BytesMut; |
561 | | /// |
562 | | /// let mut buf = BytesMut::from(&b"hello"[..]); |
563 | | /// buf.reserve(64); |
564 | | /// assert!(buf.capacity() >= 69); |
565 | | /// ``` |
566 | | /// |
567 | | /// In the following example, the existing buffer is reclaimed. |
568 | | /// |
569 | | /// ``` |
570 | | /// use bytes::{BytesMut, BufMut}; |
571 | | /// |
572 | | /// let mut buf = BytesMut::with_capacity(128); |
573 | | /// buf.put(&[0; 64][..]); |
574 | | /// |
575 | | /// let ptr = buf.as_ptr(); |
576 | | /// let other = buf.split(); |
577 | | /// |
578 | | /// assert!(buf.is_empty()); |
579 | | /// assert_eq!(buf.capacity(), 64); |
580 | | /// |
581 | | /// drop(other); |
582 | | /// buf.reserve(128); |
583 | | /// |
584 | | /// assert_eq!(buf.capacity(), 128); |
585 | | /// assert_eq!(buf.as_ptr(), ptr); |
586 | | /// ``` |
587 | | /// |
588 | | /// # Panics |
589 | | /// |
590 | | /// Panics if the new capacity overflows `usize`. |
591 | | #[inline] |
592 | 0 | pub fn reserve(&mut self, additional: usize) { |
593 | 0 | let len = self.len(); |
594 | 0 | let rem = self.capacity() - len; |
595 | 0 |
|
596 | 0 | if additional <= rem { |
597 | | // The handle can already store at least `additional` more bytes, so |
598 | | // there is no further work needed to be done. |
599 | 0 | return; |
600 | 0 | } |
601 | 0 |
|
602 | 0 | // will always succeed |
603 | 0 | let _ = self.reserve_inner(additional, true); |
604 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::reserve Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::reserve |
605 | | |
606 | | // In separate function to allow the short-circuits in `reserve` and `try_reclaim` to |
607 | | // be inline-able. Significantly helps performance. Returns false if it did not succeed. |
608 | 0 | fn reserve_inner(&mut self, additional: usize, allocate: bool) -> bool { |
609 | 0 | let len = self.len(); |
610 | 0 | let kind = self.kind(); |
611 | 0 |
|
612 | 0 | if kind == KIND_VEC { |
613 | | // If there's enough free space before the start of the buffer, then |
614 | | // just copy the data backwards and reuse the already-allocated |
615 | | // space. |
616 | | // |
617 | | // Otherwise, since backed by a vector, use `Vec::reserve` |
618 | | // |
619 | | // We need to make sure that this optimization does not kill the |
620 | | // amortized runtimes of BytesMut's operations. |
621 | | unsafe { |
622 | 0 | let off = self.get_vec_pos(); |
623 | 0 |
|
624 | 0 | // Only reuse space if we can satisfy the requested additional space. |
625 | 0 | // |
626 | 0 | // Also check if the value of `off` suggests that enough bytes |
627 | 0 | // have been read to account for the overhead of shifting all |
628 | 0 | // the data (in an amortized analysis). |
629 | 0 | // Hence the condition `off >= self.len()`. |
630 | 0 | // |
631 | 0 | // This condition also already implies that the buffer is going |
632 | 0 | // to be (at least) half-empty in the end; so we do not break |
633 | 0 | // the (amortized) runtime with future resizes of the underlying |
634 | 0 | // `Vec`. |
635 | 0 | // |
636 | 0 | // [For more details check issue #524, and PR #525.] |
637 | 0 | if self.capacity() - self.len() + off >= additional && off >= self.len() { |
638 | 0 | // There's enough space, and it's not too much overhead: |
639 | 0 | // reuse the space! |
640 | 0 | // |
641 | 0 | // Just move the pointer back to the start after copying |
642 | 0 | // data back. |
643 | 0 | let base_ptr = self.ptr.as_ptr().sub(off); |
644 | 0 | // Since `off >= self.len()`, the two regions don't overlap. |
645 | 0 | ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len); |
646 | 0 | self.ptr = vptr(base_ptr); |
647 | 0 | self.set_vec_pos(0); |
648 | 0 |
|
649 | 0 | // Length stays constant, but since we moved backwards we |
650 | 0 | // can gain capacity back. |
651 | 0 | self.cap += off; |
652 | 0 | } else { |
653 | 0 | if !allocate { |
654 | 0 | return false; |
655 | 0 | } |
656 | 0 | // Not enough space, or reusing might be too much overhead: |
657 | 0 | // allocate more space! |
658 | 0 | let mut v = |
659 | 0 | ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); |
660 | 0 | v.reserve(additional); |
661 | 0 |
|
662 | 0 | // Update the info |
663 | 0 | self.ptr = vptr(v.as_mut_ptr().add(off)); |
664 | 0 | self.cap = v.capacity() - off; |
665 | 0 | debug_assert_eq!(self.len, v.len() - off); |
666 | | } |
667 | | |
668 | 0 | return true; |
669 | | } |
670 | 0 | } |
671 | 0 |
|
672 | 0 | debug_assert_eq!(kind, KIND_ARC); |
673 | 0 | let shared: *mut Shared = self.data; |
674 | | |
675 | | // Reserving involves abandoning the currently shared buffer and |
676 | | // allocating a new vector with the requested capacity. |
677 | | // |
678 | | // Compute the new capacity |
679 | 0 | let mut new_cap = match len.checked_add(additional) { |
680 | 0 | Some(new_cap) => new_cap, |
681 | 0 | None if !allocate => return false, |
682 | 0 | None => panic!("overflow"), |
683 | | }; |
684 | | |
685 | | unsafe { |
686 | | // First, try to reclaim the buffer. This is possible if the current |
687 | | // handle is the only outstanding handle pointing to the buffer. |
688 | 0 | if (*shared).is_unique() { |
689 | | // This is the only handle to the buffer. It can be reclaimed. |
690 | | // However, before doing the work of copying data, check to make |
691 | | // sure that the vector has enough capacity. |
692 | 0 | let v = &mut (*shared).vec; |
693 | 0 |
|
694 | 0 | let v_capacity = v.capacity(); |
695 | 0 | let ptr = v.as_mut_ptr(); |
696 | 0 |
|
697 | 0 | let offset = offset_from(self.ptr.as_ptr(), ptr); |
698 | 0 |
|
699 | 0 | // Compare the condition in the `kind == KIND_VEC` case above |
700 | 0 | // for more details. |
701 | 0 | if v_capacity >= new_cap + offset { |
702 | 0 | self.cap = new_cap; |
703 | 0 | // no copy is necessary |
704 | 0 | } else if v_capacity >= new_cap && offset >= len { |
705 | 0 | // The capacity is sufficient, and copying is not too much |
706 | 0 | // overhead: reclaim the buffer! |
707 | 0 |
|
708 | 0 | // `offset >= len` means: no overlap |
709 | 0 | ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len); |
710 | 0 |
|
711 | 0 | self.ptr = vptr(ptr); |
712 | 0 | self.cap = v.capacity(); |
713 | 0 | } else { |
714 | 0 | if !allocate { |
715 | 0 | return false; |
716 | 0 | } |
717 | 0 | // calculate offset |
718 | 0 | let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize); |
719 | 0 |
|
720 | 0 | // new_cap is calculated in terms of `BytesMut`, not the underlying |
721 | 0 | // `Vec`, so it does not take the offset into account. |
722 | 0 | // |
723 | 0 | // Thus we have to manually add it here. |
724 | 0 | new_cap = new_cap.checked_add(off).expect("overflow"); |
725 | 0 |
|
726 | 0 | // The vector capacity is not sufficient. The reserve request is |
727 | 0 | // asking for more than the initial buffer capacity. Allocate more |
728 | 0 | // than requested if `new_cap` is not much bigger than the current |
729 | 0 | // capacity. |
730 | 0 | // |
731 | 0 | // There are some situations, using `reserve_exact` that the |
732 | 0 | // buffer capacity could be below `original_capacity`, so do a |
733 | 0 | // check. |
734 | 0 | let double = v.capacity().checked_shl(1).unwrap_or(new_cap); |
735 | 0 |
|
736 | 0 | new_cap = cmp::max(double, new_cap); |
737 | 0 |
|
738 | 0 | // No space - allocate more |
739 | 0 | // |
740 | 0 | // The length field of `Shared::vec` is not used by the `BytesMut`; |
741 | 0 | // instead we use the `len` field in the `BytesMut` itself. However, |
742 | 0 | // when calling `reserve`, it doesn't guarantee that data stored in |
743 | 0 | // the unused capacity of the vector is copied over to the new |
744 | 0 | // allocation, so we need to ensure that we don't have any data we |
745 | 0 | // care about in the unused capacity before calling `reserve`. |
746 | 0 | debug_assert!(off + len <= v.capacity()); |
747 | 0 | v.set_len(off + len); |
748 | 0 | v.reserve(new_cap - v.len()); |
749 | 0 |
|
750 | 0 | // Update the info |
751 | 0 | self.ptr = vptr(v.as_mut_ptr().add(off)); |
752 | 0 | self.cap = v.capacity() - off; |
753 | | } |
754 | | |
755 | 0 | return true; |
756 | 0 | } |
757 | 0 | } |
758 | 0 | if !allocate { |
759 | 0 | return false; |
760 | 0 | } |
761 | 0 |
|
762 | 0 | let original_capacity_repr = unsafe { (*shared).original_capacity_repr }; |
763 | 0 | let original_capacity = original_capacity_from_repr(original_capacity_repr); |
764 | 0 |
|
765 | 0 | new_cap = cmp::max(new_cap, original_capacity); |
766 | 0 |
|
767 | 0 | // Create a new vector to store the data |
768 | 0 | let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap)); |
769 | 0 |
|
770 | 0 | // Copy the bytes |
771 | 0 | v.extend_from_slice(self.as_ref()); |
772 | 0 |
|
773 | 0 | // Release the shared handle. This must be done *after* the bytes are |
774 | 0 | // copied. |
775 | 0 | unsafe { release_shared(shared) }; |
776 | 0 |
|
777 | 0 | // Update self |
778 | 0 | let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; |
779 | 0 | self.data = invalid_ptr(data); |
780 | 0 | self.ptr = vptr(v.as_mut_ptr()); |
781 | 0 | self.cap = v.capacity(); |
782 | 0 | debug_assert_eq!(self.len, v.len()); |
783 | 0 | return true; |
784 | 0 | } |
785 | | |
786 | | /// Attempts to cheaply reclaim already allocated capacity for at least `additional` more |
787 | | /// bytes to be inserted into the given `BytesMut` and returns `true` if it succeeded. |
788 | | /// |
789 | | /// `try_reclaim` behaves exactly like `reserve`, except that it never allocates new storage |
790 | | /// and returns a `bool` indicating whether it was successful in doing so: |
791 | | /// |
792 | | /// `try_reclaim` returns false under these conditions: |
793 | | /// - The spare capacity left is less than `additional` bytes AND |
794 | | /// - The existing allocation cannot be reclaimed cheaply or it was less than |
795 | | /// `additional` bytes in size |
796 | | /// |
797 | | /// Reclaiming the allocation cheaply is possible if the `BytesMut` has no outstanding |
798 | | /// references through other `BytesMut`s or `Bytes` which point to the same underlying |
799 | | /// storage. |
800 | | /// |
801 | | /// # Examples |
802 | | /// |
803 | | /// ``` |
804 | | /// use bytes::BytesMut; |
805 | | /// |
806 | | /// let mut buf = BytesMut::with_capacity(64); |
807 | | /// assert_eq!(true, buf.try_reclaim(64)); |
808 | | /// assert_eq!(64, buf.capacity()); |
809 | | /// |
810 | | /// buf.extend_from_slice(b"abcd"); |
811 | | /// let mut split = buf.split(); |
812 | | /// assert_eq!(60, buf.capacity()); |
813 | | /// assert_eq!(4, split.capacity()); |
814 | | /// assert_eq!(false, split.try_reclaim(64)); |
815 | | /// assert_eq!(false, buf.try_reclaim(64)); |
816 | | /// // The split buffer is filled with "abcd" |
817 | | /// assert_eq!(false, split.try_reclaim(4)); |
818 | | /// // buf is empty and has capacity for 60 bytes |
819 | | /// assert_eq!(true, buf.try_reclaim(60)); |
820 | | /// |
821 | | /// drop(buf); |
822 | | /// assert_eq!(false, split.try_reclaim(64)); |
823 | | /// |
824 | | /// split.clear(); |
825 | | /// assert_eq!(4, split.capacity()); |
826 | | /// assert_eq!(true, split.try_reclaim(64)); |
827 | | /// assert_eq!(64, split.capacity()); |
828 | | /// ``` |
829 | | // I tried splitting out try_reclaim_inner after the short circuits, but it was inlined |
830 | | // regardless with Rust 1.78.0 so probably not worth it |
831 | | #[inline] |
832 | | #[must_use = "consider BytesMut::reserve if you need an infallible reservation"] |
833 | 0 | pub fn try_reclaim(&mut self, additional: usize) -> bool { |
834 | 0 | let len = self.len(); |
835 | 0 | let rem = self.capacity() - len; |
836 | 0 |
|
837 | 0 | if additional <= rem { |
838 | | // The handle can already store at least `additional` more bytes, so |
839 | | // there is no further work needed to be done. |
840 | 0 | return true; |
841 | 0 | } |
842 | 0 |
|
843 | 0 | self.reserve_inner(additional, false) |
844 | 0 | } |
845 | | |
846 | | /// Appends given bytes to this `BytesMut`. |
847 | | /// |
848 | | /// If this `BytesMut` object does not have enough capacity, it is resized |
849 | | /// first. |
850 | | /// |
851 | | /// # Examples |
852 | | /// |
853 | | /// ``` |
854 | | /// use bytes::BytesMut; |
855 | | /// |
856 | | /// let mut buf = BytesMut::with_capacity(0); |
857 | | /// buf.extend_from_slice(b"aaabbb"); |
858 | | /// buf.extend_from_slice(b"cccddd"); |
859 | | /// |
860 | | /// assert_eq!(b"aaabbbcccddd", &buf[..]); |
861 | | /// ``` |
862 | | #[inline] |
863 | 0 | pub fn extend_from_slice(&mut self, extend: &[u8]) { |
864 | 0 | let cnt = extend.len(); |
865 | 0 | self.reserve(cnt); |
866 | 0 |
|
867 | 0 | unsafe { |
868 | 0 | let dst = self.spare_capacity_mut(); |
869 | 0 | // Reserved above |
870 | 0 | debug_assert!(dst.len() >= cnt); |
871 | | |
872 | 0 | ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt); |
873 | 0 | } |
874 | 0 |
|
875 | 0 | unsafe { |
876 | 0 | self.advance_mut(cnt); |
877 | 0 | } |
878 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::extend_from_slice Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::extend_from_slice |
879 | | |
880 | | /// Absorbs a `BytesMut` that was previously split off. |
881 | | /// |
882 | | /// If the two `BytesMut` objects were previously contiguous and not mutated |
883 | | /// in a way that causes re-allocation i.e., if `other` was created by |
884 | | /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation |
885 | | /// that just decreases a reference count and sets a few indices. |
886 | | /// Otherwise this method degenerates to |
887 | | /// `self.extend_from_slice(other.as_ref())`. |
888 | | /// |
889 | | /// # Examples |
890 | | /// |
891 | | /// ``` |
892 | | /// use bytes::BytesMut; |
893 | | /// |
894 | | /// let mut buf = BytesMut::with_capacity(64); |
895 | | /// buf.extend_from_slice(b"aaabbbcccddd"); |
896 | | /// |
897 | | /// let split = buf.split_off(6); |
898 | | /// assert_eq!(b"aaabbb", &buf[..]); |
899 | | /// assert_eq!(b"cccddd", &split[..]); |
900 | | /// |
901 | | /// buf.unsplit(split); |
902 | | /// assert_eq!(b"aaabbbcccddd", &buf[..]); |
903 | | /// ``` |
904 | 0 | pub fn unsplit(&mut self, other: BytesMut) { |
905 | 0 | if self.is_empty() { |
906 | 0 | *self = other; |
907 | 0 | return; |
908 | 0 | } |
909 | | |
910 | 0 | if let Err(other) = self.try_unsplit(other) { |
911 | 0 | self.extend_from_slice(other.as_ref()); |
912 | 0 | } |
913 | 0 | } |
914 | | |
915 | | // private |
916 | | |
917 | | // For now, use a `Vec` to manage the memory for us, but we may want to |
918 | | // change that in the future to some alternate allocator strategy. |
919 | | // |
920 | | // Thus, we don't expose an easy way to construct from a `Vec` since an |
921 | | // internal change could make a simple pattern (`BytesMut::from(vec)`) |
922 | | // suddenly a lot more expensive. |
923 | | #[inline] |
924 | 0 | pub(crate) fn from_vec(vec: Vec<u8>) -> BytesMut { |
925 | 0 | let mut vec = ManuallyDrop::new(vec); |
926 | 0 | let ptr = vptr(vec.as_mut_ptr()); |
927 | 0 | let len = vec.len(); |
928 | 0 | let cap = vec.capacity(); |
929 | 0 |
|
930 | 0 | let original_capacity_repr = original_capacity_to_repr(cap); |
931 | 0 | let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; |
932 | 0 |
|
933 | 0 | BytesMut { |
934 | 0 | ptr, |
935 | 0 | len, |
936 | 0 | cap, |
937 | 0 | data: invalid_ptr(data), |
938 | 0 | } |
939 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::from_vec Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::from_vec |
940 | | |
941 | | #[inline] |
942 | 0 | fn as_slice(&self) -> &[u8] { |
943 | 0 | unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } |
944 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::as_slice Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::as_slice |
945 | | |
946 | | #[inline] |
947 | 0 | fn as_slice_mut(&mut self) -> &mut [u8] { |
948 | 0 | unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } |
949 | 0 | } |
950 | | |
951 | | /// Advance the buffer without bounds checking. |
952 | | /// |
953 | | /// # SAFETY |
954 | | /// |
955 | | /// The caller must ensure that `count` <= `self.cap`. |
956 | 0 | pub(crate) unsafe fn advance_unchecked(&mut self, count: usize) { |
957 | 0 | // Setting the start to 0 is a no-op, so return early if this is the |
958 | 0 | // case. |
959 | 0 | if count == 0 { |
960 | 0 | return; |
961 | 0 | } |
962 | 0 |
|
963 | 0 | debug_assert!(count <= self.cap, "internal: set_start out of bounds"); |
964 | | |
965 | 0 | let kind = self.kind(); |
966 | 0 |
|
967 | 0 | if kind == KIND_VEC { |
968 | | // Setting the start when in vec representation is a little more |
969 | | // complicated. First, we have to track how far ahead the |
970 | | // "start" of the byte buffer from the beginning of the vec. We |
971 | | // also have to ensure that we don't exceed the maximum shift. |
972 | 0 | let pos = self.get_vec_pos() + count; |
973 | 0 |
|
974 | 0 | if pos <= MAX_VEC_POS { |
975 | 0 | self.set_vec_pos(pos); |
976 | 0 | } else { |
977 | 0 | // The repr must be upgraded to ARC. This will never happen |
978 | 0 | // on 64 bit systems and will only happen on 32 bit systems |
979 | 0 | // when shifting past 134,217,727 bytes. As such, we don't |
980 | 0 | // worry too much about performance here. |
981 | 0 | self.promote_to_shared(/*ref_count = */ 1); |
982 | 0 | } |
983 | 0 | } |
984 | | |
985 | | // Updating the start of the view is setting `ptr` to point to the |
986 | | // new start and updating the `len` field to reflect the new length |
987 | | // of the view. |
988 | 0 | self.ptr = vptr(self.ptr.as_ptr().add(count)); |
989 | 0 | self.len = self.len.checked_sub(count).unwrap_or(0); |
990 | 0 | self.cap -= count; |
991 | 0 | } |
992 | | |
993 | 0 | fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { |
994 | 0 | if other.capacity() == 0 { |
995 | 0 | return Ok(()); |
996 | 0 | } |
997 | 0 |
|
998 | 0 | let ptr = unsafe { self.ptr.as_ptr().add(self.len) }; |
999 | 0 | if ptr == other.ptr.as_ptr() |
1000 | 0 | && self.kind() == KIND_ARC |
1001 | 0 | && other.kind() == KIND_ARC |
1002 | 0 | && self.data == other.data |
1003 | | { |
1004 | | // Contiguous blocks, just combine directly |
1005 | 0 | self.len += other.len; |
1006 | 0 | self.cap += other.cap; |
1007 | 0 | Ok(()) |
1008 | | } else { |
1009 | 0 | Err(other) |
1010 | | } |
1011 | 0 | } |
1012 | | |
1013 | | #[inline] |
1014 | 0 | fn kind(&self) -> usize { |
1015 | 0 | self.data as usize & KIND_MASK |
1016 | 0 | } |
1017 | | |
1018 | 0 | unsafe fn promote_to_shared(&mut self, ref_cnt: usize) { |
1019 | 0 | debug_assert_eq!(self.kind(), KIND_VEC); |
1020 | 0 | debug_assert!(ref_cnt == 1 || ref_cnt == 2); |
1021 | | |
1022 | 0 | let original_capacity_repr = |
1023 | 0 | (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; |
1024 | 0 |
|
1025 | 0 | // The vec offset cannot be concurrently mutated, so there |
1026 | 0 | // should be no danger reading it. |
1027 | 0 | let off = (self.data as usize) >> VEC_POS_OFFSET; |
1028 | 0 |
|
1029 | 0 | // First, allocate a new `Shared` instance containing the |
1030 | 0 | // `Vec` fields. It's important to note that `ptr`, `len`, |
1031 | 0 | // and `cap` cannot be mutated without having `&mut self`. |
1032 | 0 | // This means that these fields will not be concurrently |
1033 | 0 | // updated and since the buffer hasn't been promoted to an |
1034 | 0 | // `Arc`, those three fields still are the components of the |
1035 | 0 | // vector. |
1036 | 0 | let shared = Box::new(Shared { |
1037 | 0 | vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off), |
1038 | 0 | original_capacity_repr, |
1039 | 0 | ref_count: AtomicUsize::new(ref_cnt), |
1040 | 0 | }); |
1041 | 0 |
|
1042 | 0 | let shared = Box::into_raw(shared); |
1043 | 0 |
|
1044 | 0 | // The pointer should be aligned, so this assert should |
1045 | 0 | // always succeed. |
1046 | 0 | debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC); |
1047 | | |
1048 | 0 | self.data = shared; |
1049 | 0 | } |
1050 | | |
1051 | | /// Makes an exact shallow clone of `self`. |
1052 | | /// |
1053 | | /// The kind of `self` doesn't matter, but this is unsafe |
1054 | | /// because the clone will have the same offsets. You must |
1055 | | /// be sure the returned value to the user doesn't allow |
1056 | | /// two views into the same range. |
1057 | | #[inline] |
1058 | 0 | unsafe fn shallow_clone(&mut self) -> BytesMut { |
1059 | 0 | if self.kind() == KIND_ARC { |
1060 | 0 | increment_shared(self.data); |
1061 | 0 | ptr::read(self) |
1062 | | } else { |
1063 | 0 | self.promote_to_shared(/*ref_count = */ 2); |
1064 | 0 | ptr::read(self) |
1065 | | } |
1066 | 0 | } |
1067 | | |
1068 | | #[inline] |
1069 | 0 | unsafe fn get_vec_pos(&self) -> usize { |
1070 | 0 | debug_assert_eq!(self.kind(), KIND_VEC); |
1071 | | |
1072 | 0 | self.data as usize >> VEC_POS_OFFSET |
1073 | 0 | } |
1074 | | |
1075 | | #[inline] |
1076 | 0 | unsafe fn set_vec_pos(&mut self, pos: usize) { |
1077 | 0 | debug_assert_eq!(self.kind(), KIND_VEC); |
1078 | 0 | debug_assert!(pos <= MAX_VEC_POS); |
1079 | | |
1080 | 0 | self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (self.data as usize & NOT_VEC_POS_MASK)); |
1081 | 0 | } |
1082 | | |
1083 | | /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`. |
1084 | | /// |
1085 | | /// The returned slice can be used to fill the buffer with data (e.g. by |
1086 | | /// reading from a file) before marking the data as initialized using the |
1087 | | /// [`set_len`] method. |
1088 | | /// |
1089 | | /// [`set_len`]: BytesMut::set_len |
1090 | | /// |
1091 | | /// # Examples |
1092 | | /// |
1093 | | /// ``` |
1094 | | /// use bytes::BytesMut; |
1095 | | /// |
1096 | | /// // Allocate buffer big enough for 10 bytes. |
1097 | | /// let mut buf = BytesMut::with_capacity(10); |
1098 | | /// |
1099 | | /// // Fill in the first 3 elements. |
1100 | | /// let uninit = buf.spare_capacity_mut(); |
1101 | | /// uninit[0].write(0); |
1102 | | /// uninit[1].write(1); |
1103 | | /// uninit[2].write(2); |
1104 | | /// |
1105 | | /// // Mark the first 3 bytes of the buffer as being initialized. |
1106 | | /// unsafe { |
1107 | | /// buf.set_len(3); |
1108 | | /// } |
1109 | | /// |
1110 | | /// assert_eq!(&buf[..], &[0, 1, 2]); |
1111 | | /// ``` |
1112 | | #[inline] |
1113 | 0 | pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] { |
1114 | 0 | unsafe { |
1115 | 0 | let ptr = self.ptr.as_ptr().add(self.len); |
1116 | 0 | let len = self.cap - self.len; |
1117 | 0 |
|
1118 | 0 | slice::from_raw_parts_mut(ptr.cast(), len) |
1119 | 0 | } |
1120 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::spare_capacity_mut Unexecuted instantiation: <bytes::bytes_mut::BytesMut>::spare_capacity_mut |
1121 | | } |
1122 | | |
1123 | | impl Drop for BytesMut { |
1124 | 0 | fn drop(&mut self) { |
1125 | 0 | let kind = self.kind(); |
1126 | 0 |
|
1127 | 0 | if kind == KIND_VEC { |
1128 | 0 | unsafe { |
1129 | 0 | let off = self.get_vec_pos(); |
1130 | 0 |
|
1131 | 0 | // Vector storage, free the vector |
1132 | 0 | let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); |
1133 | 0 | } |
1134 | 0 | } else if kind == KIND_ARC { |
1135 | 0 | unsafe { release_shared(self.data) }; |
1136 | 0 | } |
1137 | 0 | } |
1138 | | } |
1139 | | |
1140 | | impl Buf for BytesMut { |
1141 | | #[inline] |
1142 | 0 | fn remaining(&self) -> usize { |
1143 | 0 | self.len() |
1144 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut as bytes::buf::buf_impl::Buf>::remaining Unexecuted instantiation: <bytes::bytes_mut::BytesMut as bytes::buf::buf_impl::Buf>::remaining |
1145 | | |
1146 | | #[inline] |
1147 | 0 | fn chunk(&self) -> &[u8] { |
1148 | 0 | self.as_slice() |
1149 | 0 | } |
1150 | | |
1151 | | #[inline] |
1152 | 0 | fn advance(&mut self, cnt: usize) { |
1153 | 0 | assert!( |
1154 | 0 | cnt <= self.remaining(), |
1155 | 0 | "cannot advance past `remaining`: {:?} <= {:?}", |
1156 | 0 | cnt, |
1157 | 0 | self.remaining(), |
1158 | | ); |
1159 | 0 | unsafe { |
1160 | 0 | // SAFETY: We've checked that `cnt` <= `self.remaining()` and we know that |
1161 | 0 | // `self.remaining()` <= `self.cap`. |
1162 | 0 | self.advance_unchecked(cnt); |
1163 | 0 | } |
1164 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut as bytes::buf::buf_impl::Buf>::advance Unexecuted instantiation: <bytes::bytes_mut::BytesMut as bytes::buf::buf_impl::Buf>::advance |
1165 | | |
1166 | 0 | fn copy_to_bytes(&mut self, len: usize) -> Bytes { |
1167 | 0 | self.split_to(len).freeze() |
1168 | 0 | } |
1169 | | } |
1170 | | |
1171 | | unsafe impl BufMut for BytesMut { |
1172 | | #[inline] |
1173 | 0 | fn remaining_mut(&self) -> usize { |
1174 | 0 | usize::MAX - self.len() |
1175 | 0 | } |
1176 | | |
1177 | | #[inline] |
1178 | 0 | unsafe fn advance_mut(&mut self, cnt: usize) { |
1179 | 0 | let remaining = self.cap - self.len(); |
1180 | 0 | if cnt > remaining { |
1181 | 0 | super::panic_advance(&TryGetError { |
1182 | 0 | requested: cnt, |
1183 | 0 | available: remaining, |
1184 | 0 | }); |
1185 | 0 | } |
1186 | 0 | // Addition won't overflow since it is at most `self.cap`. |
1187 | 0 | self.len = self.len() + cnt; |
1188 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut as bytes::buf::buf_mut::BufMut>::advance_mut Unexecuted instantiation: <bytes::bytes_mut::BytesMut as bytes::buf::buf_mut::BufMut>::advance_mut |
1189 | | |
1190 | | #[inline] |
1191 | 0 | fn chunk_mut(&mut self) -> &mut UninitSlice { |
1192 | 0 | if self.capacity() == self.len() { |
1193 | 0 | self.reserve(64); |
1194 | 0 | } |
1195 | 0 | self.spare_capacity_mut().into() |
1196 | 0 | } |
1197 | | |
1198 | | // Specialize these methods so they can skip checking `remaining_mut` |
1199 | | // and `advance_mut`. |
1200 | | |
1201 | 0 | fn put<T: Buf>(&mut self, mut src: T) |
1202 | 0 | where |
1203 | 0 | Self: Sized, |
1204 | 0 | { |
1205 | 0 | while src.has_remaining() { |
1206 | 0 | let s = src.chunk(); |
1207 | 0 | let l = s.len(); |
1208 | 0 | self.extend_from_slice(s); |
1209 | 0 | src.advance(l); |
1210 | 0 | } |
1211 | 0 | } |
1212 | | |
1213 | 0 | fn put_slice(&mut self, src: &[u8]) { |
1214 | 0 | self.extend_from_slice(src); |
1215 | 0 | } |
1216 | | |
1217 | 0 | fn put_bytes(&mut self, val: u8, cnt: usize) { |
1218 | 0 | self.reserve(cnt); |
1219 | 0 | unsafe { |
1220 | 0 | let dst = self.spare_capacity_mut(); |
1221 | 0 | // Reserved above |
1222 | 0 | debug_assert!(dst.len() >= cnt); |
1223 | | |
1224 | 0 | ptr::write_bytes(dst.as_mut_ptr(), val, cnt); |
1225 | 0 |
|
1226 | 0 | self.advance_mut(cnt); |
1227 | 0 | } |
1228 | 0 | } |
1229 | | } |
1230 | | |
1231 | | impl AsRef<[u8]> for BytesMut { |
1232 | | #[inline] |
1233 | 0 | fn as_ref(&self) -> &[u8] { |
1234 | 0 | self.as_slice() |
1235 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut as core::convert::AsRef<[u8]>>::as_ref Unexecuted instantiation: <bytes::bytes_mut::BytesMut as core::convert::AsRef<[u8]>>::as_ref |
1236 | | } |
1237 | | |
1238 | | impl Deref for BytesMut { |
1239 | | type Target = [u8]; |
1240 | | |
1241 | | #[inline] |
1242 | 0 | fn deref(&self) -> &[u8] { |
1243 | 0 | self.as_ref() |
1244 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut as core::ops::deref::Deref>::deref Unexecuted instantiation: <bytes::bytes_mut::BytesMut as core::ops::deref::Deref>::deref |
1245 | | } |
1246 | | |
1247 | | impl AsMut<[u8]> for BytesMut { |
1248 | | #[inline] |
1249 | 0 | fn as_mut(&mut self) -> &mut [u8] { |
1250 | 0 | self.as_slice_mut() |
1251 | 0 | } |
1252 | | } |
1253 | | |
1254 | | impl DerefMut for BytesMut { |
1255 | | #[inline] |
1256 | 0 | fn deref_mut(&mut self) -> &mut [u8] { |
1257 | 0 | self.as_mut() |
1258 | 0 | } |
1259 | | } |
1260 | | |
1261 | | impl<'a> From<&'a [u8]> for BytesMut { |
1262 | 0 | fn from(src: &'a [u8]) -> BytesMut { |
1263 | 0 | BytesMut::from_vec(src.to_vec()) |
1264 | 0 | } |
1265 | | } |
1266 | | |
1267 | | impl<'a> From<&'a str> for BytesMut { |
1268 | 0 | fn from(src: &'a str) -> BytesMut { |
1269 | 0 | BytesMut::from(src.as_bytes()) |
1270 | 0 | } |
1271 | | } |
1272 | | |
1273 | | impl From<BytesMut> for Bytes { |
1274 | 0 | fn from(src: BytesMut) -> Bytes { |
1275 | 0 | src.freeze() |
1276 | 0 | } |
1277 | | } |
1278 | | |
1279 | | impl PartialEq for BytesMut { |
1280 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1281 | 0 | self.as_slice() == other.as_slice() |
1282 | 0 | } |
1283 | | } |
1284 | | |
1285 | | impl PartialOrd for BytesMut { |
1286 | 0 | fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { |
1287 | 0 | self.as_slice().partial_cmp(other.as_slice()) |
1288 | 0 | } |
1289 | | } |
1290 | | |
1291 | | impl Ord for BytesMut { |
1292 | 0 | fn cmp(&self, other: &BytesMut) -> cmp::Ordering { |
1293 | 0 | self.as_slice().cmp(other.as_slice()) |
1294 | 0 | } |
1295 | | } |
1296 | | |
1297 | | impl Eq for BytesMut {} |
1298 | | |
1299 | | impl Default for BytesMut { |
1300 | | #[inline] |
1301 | 0 | fn default() -> BytesMut { |
1302 | 0 | BytesMut::new() |
1303 | 0 | } |
1304 | | } |
1305 | | |
1306 | | impl hash::Hash for BytesMut { |
1307 | 0 | fn hash<H>(&self, state: &mut H) |
1308 | 0 | where |
1309 | 0 | H: hash::Hasher, |
1310 | 0 | { |
1311 | 0 | let s: &[u8] = self.as_ref(); |
1312 | 0 | s.hash(state); |
1313 | 0 | } |
1314 | | } |
1315 | | |
1316 | | impl Borrow<[u8]> for BytesMut { |
1317 | 0 | fn borrow(&self) -> &[u8] { |
1318 | 0 | self.as_ref() |
1319 | 0 | } |
1320 | | } |
1321 | | |
1322 | | impl BorrowMut<[u8]> for BytesMut { |
1323 | 0 | fn borrow_mut(&mut self) -> &mut [u8] { |
1324 | 0 | self.as_mut() |
1325 | 0 | } |
1326 | | } |
1327 | | |
1328 | | impl fmt::Write for BytesMut { |
1329 | | #[inline] |
1330 | 0 | fn write_str(&mut self, s: &str) -> fmt::Result { |
1331 | 0 | if self.remaining_mut() >= s.len() { |
1332 | 0 | self.put_slice(s.as_bytes()); |
1333 | 0 | Ok(()) |
1334 | | } else { |
1335 | 0 | Err(fmt::Error) |
1336 | | } |
1337 | 0 | } |
1338 | | |
1339 | | #[inline] |
1340 | 0 | fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { |
1341 | 0 | fmt::write(self, args) |
1342 | 0 | } |
1343 | | } |
1344 | | |
1345 | | impl Clone for BytesMut { |
1346 | 0 | fn clone(&self) -> BytesMut { |
1347 | 0 | BytesMut::from(&self[..]) |
1348 | 0 | } |
1349 | | } |
1350 | | |
1351 | | impl IntoIterator for BytesMut { |
1352 | | type Item = u8; |
1353 | | type IntoIter = IntoIter<BytesMut>; |
1354 | | |
1355 | 0 | fn into_iter(self) -> Self::IntoIter { |
1356 | 0 | IntoIter::new(self) |
1357 | 0 | } |
1358 | | } |
1359 | | |
1360 | | impl<'a> IntoIterator for &'a BytesMut { |
1361 | | type Item = &'a u8; |
1362 | | type IntoIter = core::slice::Iter<'a, u8>; |
1363 | | |
1364 | 0 | fn into_iter(self) -> Self::IntoIter { |
1365 | 0 | self.as_ref().iter() |
1366 | 0 | } |
1367 | | } |
1368 | | |
1369 | | impl Extend<u8> for BytesMut { |
1370 | 0 | fn extend<T>(&mut self, iter: T) |
1371 | 0 | where |
1372 | 0 | T: IntoIterator<Item = u8>, |
1373 | 0 | { |
1374 | 0 | let iter = iter.into_iter(); |
1375 | 0 |
|
1376 | 0 | let (lower, _) = iter.size_hint(); |
1377 | 0 | self.reserve(lower); |
1378 | | |
1379 | | // TODO: optimize |
1380 | | // 1. If self.kind() == KIND_VEC, use Vec::extend |
1381 | 0 | for b in iter { |
1382 | 0 | self.put_u8(b); |
1383 | 0 | } |
1384 | 0 | } |
1385 | | } |
1386 | | |
1387 | | impl<'a> Extend<&'a u8> for BytesMut { |
1388 | 0 | fn extend<T>(&mut self, iter: T) |
1389 | 0 | where |
1390 | 0 | T: IntoIterator<Item = &'a u8>, |
1391 | 0 | { |
1392 | 0 | self.extend(iter.into_iter().copied()) |
1393 | 0 | } |
1394 | | } |
1395 | | |
1396 | | impl Extend<Bytes> for BytesMut { |
1397 | 0 | fn extend<T>(&mut self, iter: T) |
1398 | 0 | where |
1399 | 0 | T: IntoIterator<Item = Bytes>, |
1400 | 0 | { |
1401 | 0 | for bytes in iter { |
1402 | 0 | self.extend_from_slice(&bytes) |
1403 | | } |
1404 | 0 | } |
1405 | | } |
1406 | | |
1407 | | impl FromIterator<u8> for BytesMut { |
1408 | 0 | fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self { |
1409 | 0 | BytesMut::from_vec(Vec::from_iter(into_iter)) |
1410 | 0 | } |
1411 | | } |
1412 | | |
1413 | | impl<'a> FromIterator<&'a u8> for BytesMut { |
1414 | 0 | fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self { |
1415 | 0 | BytesMut::from_iter(into_iter.into_iter().copied()) |
1416 | 0 | } |
1417 | | } |
1418 | | |
1419 | | /* |
1420 | | * |
1421 | | * ===== Inner ===== |
1422 | | * |
1423 | | */ |
1424 | | |
1425 | 0 | unsafe fn increment_shared(ptr: *mut Shared) { |
1426 | 0 | let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed); |
1427 | 0 |
|
1428 | 0 | if old_size > isize::MAX as usize { |
1429 | 0 | crate::abort(); |
1430 | 0 | } |
1431 | 0 | } |
1432 | | |
1433 | 0 | unsafe fn release_shared(ptr: *mut Shared) { |
1434 | 0 | // `Shared` storage... follow the drop steps from Arc. |
1435 | 0 | if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 { |
1436 | 0 | return; |
1437 | 0 | } |
1438 | 0 |
|
1439 | 0 | // This fence is needed to prevent reordering of use of the data and |
1440 | 0 | // deletion of the data. Because it is marked `Release`, the decreasing |
1441 | 0 | // of the reference count synchronizes with this `Acquire` fence. This |
1442 | 0 | // means that use of the data happens before decreasing the reference |
1443 | 0 | // count, which happens before this fence, which happens before the |
1444 | 0 | // deletion of the data. |
1445 | 0 | // |
1446 | 0 | // As explained in the [Boost documentation][1], |
1447 | 0 | // |
1448 | 0 | // > It is important to enforce any possible access to the object in one |
1449 | 0 | // > thread (through an existing reference) to *happen before* deleting |
1450 | 0 | // > the object in a different thread. This is achieved by a "release" |
1451 | 0 | // > operation after dropping a reference (any access to the object |
1452 | 0 | // > through this reference must obviously happened before), and an |
1453 | 0 | // > "acquire" operation before deleting the object. |
1454 | 0 | // |
1455 | 0 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
1456 | 0 | // |
1457 | 0 | // Thread sanitizer does not support atomic fences. Use an atomic load |
1458 | 0 | // instead. |
1459 | 0 | (*ptr).ref_count.load(Ordering::Acquire); |
1460 | 0 |
|
1461 | 0 | // Drop the data |
1462 | 0 | drop(Box::from_raw(ptr)); |
1463 | 0 | } |
1464 | | |
1465 | | impl Shared { |
1466 | 0 | fn is_unique(&self) -> bool { |
1467 | 0 | // The goal is to check if the current handle is the only handle |
1468 | 0 | // that currently has access to the buffer. This is done by |
1469 | 0 | // checking if the `ref_count` is currently 1. |
1470 | 0 | // |
1471 | 0 | // The `Acquire` ordering synchronizes with the `Release` as |
1472 | 0 | // part of the `fetch_sub` in `release_shared`. The `fetch_sub` |
1473 | 0 | // operation guarantees that any mutations done in other threads |
1474 | 0 | // are ordered before the `ref_count` is decremented. As such, |
1475 | 0 | // this `Acquire` will guarantee that those mutations are |
1476 | 0 | // visible to the current thread. |
1477 | 0 | self.ref_count.load(Ordering::Acquire) == 1 |
1478 | 0 | } |
1479 | | } |
1480 | | |
1481 | | #[inline] |
1482 | 0 | fn original_capacity_to_repr(cap: usize) -> usize { |
1483 | 0 | let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); |
1484 | 0 | cmp::min( |
1485 | 0 | width, |
1486 | 0 | MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH, |
1487 | 0 | ) |
1488 | 0 | } Unexecuted instantiation: bytes::bytes_mut::original_capacity_to_repr Unexecuted instantiation: bytes::bytes_mut::original_capacity_to_repr |
1489 | | |
1490 | 0 | fn original_capacity_from_repr(repr: usize) -> usize { |
1491 | 0 | if repr == 0 { |
1492 | 0 | return 0; |
1493 | 0 | } |
1494 | 0 |
|
1495 | 0 | 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) |
1496 | 0 | } |
1497 | | |
1498 | | #[cfg(test)] |
1499 | | mod tests { |
1500 | | use super::*; |
1501 | | |
1502 | | #[test] |
1503 | | fn test_original_capacity_to_repr() { |
1504 | | assert_eq!(original_capacity_to_repr(0), 0); |
1505 | | |
1506 | | let max_width = 32; |
1507 | | |
1508 | | for width in 1..(max_width + 1) { |
1509 | | let cap = 1 << width - 1; |
1510 | | |
1511 | | let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { |
1512 | | 0 |
1513 | | } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { |
1514 | | width - MIN_ORIGINAL_CAPACITY_WIDTH |
1515 | | } else { |
1516 | | MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH |
1517 | | }; |
1518 | | |
1519 | | assert_eq!(original_capacity_to_repr(cap), expected); |
1520 | | |
1521 | | if width > 1 { |
1522 | | assert_eq!(original_capacity_to_repr(cap + 1), expected); |
1523 | | } |
1524 | | |
1525 | | // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below |
1526 | | if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { |
1527 | | assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); |
1528 | | assert_eq!(original_capacity_to_repr(cap + 76), expected); |
1529 | | } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { |
1530 | | assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); |
1531 | | assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); |
1532 | | } |
1533 | | } |
1534 | | } |
1535 | | |
1536 | | #[test] |
1537 | | fn test_original_capacity_from_repr() { |
1538 | | assert_eq!(0, original_capacity_from_repr(0)); |
1539 | | |
1540 | | let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; |
1541 | | |
1542 | | assert_eq!(min_cap, original_capacity_from_repr(1)); |
1543 | | assert_eq!(min_cap * 2, original_capacity_from_repr(2)); |
1544 | | assert_eq!(min_cap * 4, original_capacity_from_repr(3)); |
1545 | | assert_eq!(min_cap * 8, original_capacity_from_repr(4)); |
1546 | | assert_eq!(min_cap * 16, original_capacity_from_repr(5)); |
1547 | | assert_eq!(min_cap * 32, original_capacity_from_repr(6)); |
1548 | | assert_eq!(min_cap * 64, original_capacity_from_repr(7)); |
1549 | | } |
1550 | | } |
1551 | | |
1552 | | unsafe impl Send for BytesMut {} |
1553 | | unsafe impl Sync for BytesMut {} |
1554 | | |
1555 | | /* |
1556 | | * |
1557 | | * ===== PartialEq / PartialOrd ===== |
1558 | | * |
1559 | | */ |
1560 | | |
1561 | | impl PartialEq<[u8]> for BytesMut { |
1562 | 0 | fn eq(&self, other: &[u8]) -> bool { |
1563 | 0 | &**self == other |
1564 | 0 | } |
1565 | | } |
1566 | | |
1567 | | impl PartialOrd<[u8]> for BytesMut { |
1568 | 0 | fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> { |
1569 | 0 | (**self).partial_cmp(other) |
1570 | 0 | } |
1571 | | } |
1572 | | |
1573 | | impl PartialEq<BytesMut> for [u8] { |
1574 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1575 | 0 | *other == *self |
1576 | 0 | } |
1577 | | } |
1578 | | |
1579 | | impl PartialOrd<BytesMut> for [u8] { |
1580 | 0 | fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { |
1581 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) |
1582 | 0 | } |
1583 | | } |
1584 | | |
1585 | | impl PartialEq<str> for BytesMut { |
1586 | 0 | fn eq(&self, other: &str) -> bool { |
1587 | 0 | &**self == other.as_bytes() |
1588 | 0 | } |
1589 | | } |
1590 | | |
1591 | | impl PartialOrd<str> for BytesMut { |
1592 | 0 | fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> { |
1593 | 0 | (**self).partial_cmp(other.as_bytes()) |
1594 | 0 | } |
1595 | | } |
1596 | | |
1597 | | impl PartialEq<BytesMut> for str { |
1598 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1599 | 0 | *other == *self |
1600 | 0 | } |
1601 | | } |
1602 | | |
1603 | | impl PartialOrd<BytesMut> for str { |
1604 | 0 | fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { |
1605 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) |
1606 | 0 | } |
1607 | | } |
1608 | | |
1609 | | impl PartialEq<Vec<u8>> for BytesMut { |
1610 | 0 | fn eq(&self, other: &Vec<u8>) -> bool { |
1611 | 0 | *self == other[..] |
1612 | 0 | } |
1613 | | } |
1614 | | |
1615 | | impl PartialOrd<Vec<u8>> for BytesMut { |
1616 | 0 | fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> { |
1617 | 0 | (**self).partial_cmp(&other[..]) |
1618 | 0 | } |
1619 | | } |
1620 | | |
1621 | | impl PartialEq<BytesMut> for Vec<u8> { |
1622 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1623 | 0 | *other == *self |
1624 | 0 | } |
1625 | | } |
1626 | | |
1627 | | impl PartialOrd<BytesMut> for Vec<u8> { |
1628 | 0 | fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { |
1629 | 0 | other.partial_cmp(self) |
1630 | 0 | } |
1631 | | } |
1632 | | |
1633 | | impl PartialEq<String> for BytesMut { |
1634 | 0 | fn eq(&self, other: &String) -> bool { |
1635 | 0 | *self == other[..] |
1636 | 0 | } |
1637 | | } |
1638 | | |
1639 | | impl PartialOrd<String> for BytesMut { |
1640 | 0 | fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> { |
1641 | 0 | (**self).partial_cmp(other.as_bytes()) |
1642 | 0 | } |
1643 | | } |
1644 | | |
1645 | | impl PartialEq<BytesMut> for String { |
1646 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1647 | 0 | *other == *self |
1648 | 0 | } |
1649 | | } |
1650 | | |
1651 | | impl PartialOrd<BytesMut> for String { |
1652 | 0 | fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { |
1653 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) |
1654 | 0 | } |
1655 | | } |
1656 | | |
1657 | | impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut |
1658 | | where |
1659 | | BytesMut: PartialEq<T>, |
1660 | | { |
1661 | 0 | fn eq(&self, other: &&'a T) -> bool { |
1662 | 0 | *self == **other |
1663 | 0 | } Unexecuted instantiation: <bytes::bytes_mut::BytesMut as core::cmp::PartialEq<&[u8]>>::eq Unexecuted instantiation: <bytes::bytes_mut::BytesMut as core::cmp::PartialEq<&str>>::eq |
1664 | | } |
1665 | | |
1666 | | impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut |
1667 | | where |
1668 | | BytesMut: PartialOrd<T>, |
1669 | | { |
1670 | 0 | fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> { |
1671 | 0 | self.partial_cmp(*other) |
1672 | 0 | } |
1673 | | } |
1674 | | |
1675 | | impl PartialEq<BytesMut> for &[u8] { |
1676 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1677 | 0 | *other == *self |
1678 | 0 | } |
1679 | | } |
1680 | | |
1681 | | impl PartialOrd<BytesMut> for &[u8] { |
1682 | 0 | fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { |
1683 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) |
1684 | 0 | } |
1685 | | } |
1686 | | |
1687 | | impl PartialEq<BytesMut> for &str { |
1688 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1689 | 0 | *other == *self |
1690 | 0 | } |
1691 | | } |
1692 | | |
1693 | | impl PartialOrd<BytesMut> for &str { |
1694 | 0 | fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { |
1695 | 0 | other.partial_cmp(self) |
1696 | 0 | } |
1697 | | } |
1698 | | |
1699 | | impl PartialEq<BytesMut> for Bytes { |
1700 | 0 | fn eq(&self, other: &BytesMut) -> bool { |
1701 | 0 | other[..] == self[..] |
1702 | 0 | } |
1703 | | } |
1704 | | |
1705 | | impl PartialEq<Bytes> for BytesMut { |
1706 | 0 | fn eq(&self, other: &Bytes) -> bool { |
1707 | 0 | other[..] == self[..] |
1708 | 0 | } |
1709 | | } |
1710 | | |
1711 | | impl From<BytesMut> for Vec<u8> { |
1712 | 0 | fn from(bytes: BytesMut) -> Self { |
1713 | 0 | let kind = bytes.kind(); |
1714 | 0 | let bytes = ManuallyDrop::new(bytes); |
1715 | | |
1716 | 0 | let mut vec = if kind == KIND_VEC { |
1717 | | unsafe { |
1718 | 0 | let off = bytes.get_vec_pos(); |
1719 | 0 | rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off) |
1720 | | } |
1721 | | } else { |
1722 | 0 | let shared = bytes.data as *mut Shared; |
1723 | 0 |
|
1724 | 0 | if unsafe { (*shared).is_unique() } { |
1725 | 0 | let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new()); |
1726 | 0 |
|
1727 | 0 | unsafe { release_shared(shared) }; |
1728 | 0 |
|
1729 | 0 | vec |
1730 | | } else { |
1731 | 0 | return ManuallyDrop::into_inner(bytes).deref().to_vec(); |
1732 | | } |
1733 | | }; |
1734 | | |
1735 | 0 | let len = bytes.len; |
1736 | 0 |
|
1737 | 0 | unsafe { |
1738 | 0 | ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len); |
1739 | 0 | vec.set_len(len); |
1740 | 0 | } |
1741 | 0 |
|
1742 | 0 | vec |
1743 | 0 | } |
1744 | | } |
1745 | | |
1746 | | #[inline] |
1747 | 0 | fn vptr(ptr: *mut u8) -> NonNull<u8> { |
1748 | 0 | if cfg!(debug_assertions) { |
1749 | 0 | NonNull::new(ptr).expect("Vec pointer should be non-null") |
1750 | | } else { |
1751 | 0 | unsafe { NonNull::new_unchecked(ptr) } |
1752 | | } |
1753 | 0 | } Unexecuted instantiation: bytes::bytes_mut::vptr Unexecuted instantiation: bytes::bytes_mut::vptr |
1754 | | |
1755 | | /// Returns a dangling pointer with the given address. This is used to store |
1756 | | /// integer data in pointer fields. |
1757 | | /// |
1758 | | /// It is equivalent to `addr as *mut T`, but this fails on miri when strict |
1759 | | /// provenance checking is enabled. |
1760 | | #[inline] |
1761 | 0 | fn invalid_ptr<T>(addr: usize) -> *mut T { |
1762 | 0 | let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr); |
1763 | 0 | debug_assert_eq!(ptr as usize, addr); |
1764 | 0 | ptr.cast::<T>() |
1765 | 0 | } |
1766 | | |
1767 | 0 | unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> { |
1768 | 0 | let ptr = ptr.sub(off); |
1769 | 0 | len += off; |
1770 | 0 | cap += off; |
1771 | 0 |
|
1772 | 0 | Vec::from_raw_parts(ptr, len, cap) |
1773 | 0 | } |
1774 | | |
1775 | | // ===== impl SharedVtable ===== |
1776 | | |
1777 | | static SHARED_VTABLE: Vtable = Vtable { |
1778 | | clone: shared_v_clone, |
1779 | | to_vec: shared_v_to_vec, |
1780 | | to_mut: shared_v_to_mut, |
1781 | | is_unique: shared_v_is_unique, |
1782 | | drop: shared_v_drop, |
1783 | | }; |
1784 | | |
1785 | 0 | unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { |
1786 | 0 | let shared = data.load(Ordering::Relaxed) as *mut Shared; |
1787 | 0 | increment_shared(shared); |
1788 | 0 |
|
1789 | 0 | let data = AtomicPtr::new(shared as *mut ()); |
1790 | 0 | Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) |
1791 | 0 | } |
1792 | | |
1793 | 0 | unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { |
1794 | 0 | let shared: *mut Shared = data.load(Ordering::Relaxed).cast(); |
1795 | 0 |
|
1796 | 0 | if (*shared).is_unique() { |
1797 | 0 | let shared = &mut *shared; |
1798 | 0 |
|
1799 | 0 | // Drop shared |
1800 | 0 | let mut vec = mem::replace(&mut shared.vec, Vec::new()); |
1801 | 0 | release_shared(shared); |
1802 | 0 |
|
1803 | 0 | // Copy back buffer |
1804 | 0 | ptr::copy(ptr, vec.as_mut_ptr(), len); |
1805 | 0 | vec.set_len(len); |
1806 | 0 |
|
1807 | 0 | vec |
1808 | | } else { |
1809 | 0 | let v = slice::from_raw_parts(ptr, len).to_vec(); |
1810 | 0 | release_shared(shared); |
1811 | 0 | v |
1812 | | } |
1813 | 0 | } |
1814 | | |
1815 | 0 | unsafe fn shared_v_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { |
1816 | 0 | let shared: *mut Shared = data.load(Ordering::Relaxed).cast(); |
1817 | 0 |
|
1818 | 0 | if (*shared).is_unique() { |
1819 | 0 | let shared = &mut *shared; |
1820 | 0 |
|
1821 | 0 | // The capacity is always the original capacity of the buffer |
1822 | 0 | // minus the offset from the start of the buffer |
1823 | 0 | let v = &mut shared.vec; |
1824 | 0 | let v_capacity = v.capacity(); |
1825 | 0 | let v_ptr = v.as_mut_ptr(); |
1826 | 0 | let offset = offset_from(ptr as *mut u8, v_ptr); |
1827 | 0 | let cap = v_capacity - offset; |
1828 | 0 |
|
1829 | 0 | let ptr = vptr(ptr as *mut u8); |
1830 | 0 |
|
1831 | 0 | BytesMut { |
1832 | 0 | ptr, |
1833 | 0 | len, |
1834 | 0 | cap, |
1835 | 0 | data: shared, |
1836 | 0 | } |
1837 | | } else { |
1838 | 0 | let v = slice::from_raw_parts(ptr, len).to_vec(); |
1839 | 0 | release_shared(shared); |
1840 | 0 | BytesMut::from_vec(v) |
1841 | | } |
1842 | 0 | } |
1843 | | |
1844 | 0 | unsafe fn shared_v_is_unique(data: &AtomicPtr<()>) -> bool { |
1845 | 0 | let shared = data.load(Ordering::Acquire); |
1846 | 0 | let ref_count = (*shared.cast::<Shared>()).ref_count.load(Ordering::Relaxed); |
1847 | 0 | ref_count == 1 |
1848 | 0 | } |
1849 | | |
1850 | 0 | unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { |
1851 | 0 | data.with_mut(|shared| { |
1852 | 0 | release_shared(*shared as *mut Shared); |
1853 | 0 | }); |
1854 | 0 | } |
1855 | | |
1856 | | // compile-fails |
1857 | | |
1858 | | /// ```compile_fail |
1859 | | /// use bytes::BytesMut; |
1860 | | /// #[deny(unused_must_use)] |
1861 | | /// { |
1862 | | /// let mut b1 = BytesMut::from("hello world"); |
1863 | | /// b1.split_to(6); |
1864 | | /// } |
1865 | | /// ``` |
1866 | 0 | fn _split_to_must_use() {} |
1867 | | |
1868 | | /// ```compile_fail |
1869 | | /// use bytes::BytesMut; |
1870 | | /// #[deny(unused_must_use)] |
1871 | | /// { |
1872 | | /// let mut b1 = BytesMut::from("hello world"); |
1873 | | /// b1.split_off(6); |
1874 | | /// } |
1875 | | /// ``` |
1876 | 0 | fn _split_off_must_use() {} |
1877 | | |
1878 | | /// ```compile_fail |
1879 | | /// use bytes::BytesMut; |
1880 | | /// #[deny(unused_must_use)] |
1881 | | /// { |
1882 | | /// let mut b1 = BytesMut::from("hello world"); |
1883 | | /// b1.split(); |
1884 | | /// } |
1885 | | /// ``` |
1886 | 0 | fn _split_must_use() {} |
1887 | | |
1888 | | // fuzz tests |
1889 | | #[cfg(all(test, loom))] |
1890 | | mod fuzz { |
1891 | | use loom::sync::Arc; |
1892 | | use loom::thread; |
1893 | | |
1894 | | use super::BytesMut; |
1895 | | use crate::Bytes; |
1896 | | |
1897 | | #[test] |
1898 | | fn bytes_mut_cloning_frozen() { |
1899 | | loom::model(|| { |
1900 | | let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze(); |
1901 | | let addr = a.as_ptr() as usize; |
1902 | | |
1903 | | // test the Bytes::clone is Sync by putting it in an Arc |
1904 | | let a1 = Arc::new(a); |
1905 | | let a2 = a1.clone(); |
1906 | | |
1907 | | let t1 = thread::spawn(move || { |
1908 | | let b: Bytes = (*a1).clone(); |
1909 | | assert_eq!(b.as_ptr() as usize, addr); |
1910 | | }); |
1911 | | |
1912 | | let t2 = thread::spawn(move || { |
1913 | | let b: Bytes = (*a2).clone(); |
1914 | | assert_eq!(b.as_ptr() as usize, addr); |
1915 | | }); |
1916 | | |
1917 | | t1.join().unwrap(); |
1918 | | t2.join().unwrap(); |
1919 | | }); |
1920 | | } |
1921 | | } |