/rust/registry/src/index.crates.io-6f17d22bba15001f/bytes-1.6.1/src/bytes.rs
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | use core::iter::FromIterator; | 
| 2 |  | use core::ops::{Deref, RangeBounds}; | 
| 3 |  | use core::{cmp, fmt, hash, mem, ptr, slice, usize}; | 
| 4 |  |  | 
| 5 |  | use alloc::{ | 
| 6 |  |     alloc::{dealloc, Layout}, | 
| 7 |  |     borrow::Borrow, | 
| 8 |  |     boxed::Box, | 
| 9 |  |     string::String, | 
| 10 |  |     vec::Vec, | 
| 11 |  | }; | 
| 12 |  |  | 
| 13 |  | use crate::buf::IntoIter; | 
| 14 |  | #[allow(unused)] | 
| 15 |  | use crate::loom::sync::atomic::AtomicMut; | 
| 16 |  | use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; | 
| 17 |  | use crate::Buf; | 
| 18 |  |  | 
| 19 |  | /// A cheaply cloneable and sliceable chunk of contiguous memory. | 
| 20 |  | /// | 
| 21 |  | /// `Bytes` is an efficient container for storing and operating on contiguous | 
| 22 |  | /// slices of memory. It is intended for use primarily in networking code, but | 
| 23 |  | /// could have applications elsewhere as well. | 
| 24 |  | /// | 
| 25 |  | /// `Bytes` values facilitate zero-copy network programming by allowing multiple | 
| 26 |  | /// `Bytes` objects to point to the same underlying memory. | 
| 27 |  | /// | 
| 28 |  | /// `Bytes` does not have a single implementation. It is an interface, whose | 
| 29 |  | /// exact behavior is implemented through dynamic dispatch in several underlying | 
| 30 |  | /// implementations of `Bytes`. | 
| 31 |  | /// | 
| 32 |  | /// All `Bytes` implementations must fulfill the following requirements: | 
| 33 |  | /// - They are cheaply cloneable and thereby shareable between an unlimited amount | 
| 34 |  | ///   of components, for example by modifying a reference count. | 
| 35 |  | /// - Instances can be sliced to refer to a subset of the original buffer. | 
| 36 |  | /// | 
| 37 |  | /// ``` | 
| 38 |  | /// use bytes::Bytes; | 
| 39 |  | /// | 
| 40 |  | /// let mut mem = Bytes::from("Hello world"); | 
| 41 |  | /// let a = mem.slice(0..5); | 
| 42 |  | /// | 
| 43 |  | /// assert_eq!(a, "Hello"); | 
| 44 |  | /// | 
| 45 |  | /// let b = mem.split_to(6); | 
| 46 |  | /// | 
| 47 |  | /// assert_eq!(mem, "world"); | 
| 48 |  | /// assert_eq!(b, "Hello "); | 
| 49 |  | /// ``` | 
| 50 |  | /// | 
| 51 |  | /// # Memory layout | 
| 52 |  | /// | 
| 53 |  | /// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used | 
| 54 |  | /// to track information about which segment of the underlying memory the | 
| 55 |  | /// `Bytes` handle has access to. | 
| 56 |  | /// | 
| 57 |  | /// `Bytes` keeps both a pointer to the shared state containing the full memory | 
| 58 |  | /// slice and a pointer to the start of the region visible by the handle. | 
| 59 |  | /// `Bytes` also tracks the length of its view into the memory. | 
| 60 |  | /// | 
| 61 |  | /// # Sharing | 
| 62 |  | /// | 
| 63 |  | /// `Bytes` contains a vtable, which allows implementations of `Bytes` to define | 
| 64 |  | /// how sharing/cloning is implemented in detail. | 
| 65 |  | /// When `Bytes::clone()` is called, `Bytes` will call the vtable function for | 
| 66 |  | /// cloning the backing storage in order to share it behind multiple `Bytes` | 
| 67 |  | /// instances. | 
| 68 |  | /// | 
| 69 |  | /// For `Bytes` implementations which refer to constant memory (e.g. created | 
| 70 |  | /// via `Bytes::from_static()`) the cloning implementation will be a no-op. | 
| 71 |  | /// | 
| 72 |  | /// For `Bytes` implementations which point to a reference counted shared storage | 
| 73 |  | /// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the | 
| 74 |  | /// reference count. | 
| 75 |  | /// | 
| 76 |  | /// Due to this mechanism, multiple `Bytes` instances may point to the same | 
| 77 |  | /// shared memory region. | 
| 78 |  | /// Each `Bytes` instance can point to different sections within that | 
| 79 |  | /// memory region, and `Bytes` instances may or may not have overlapping views | 
| 80 |  | /// into the memory. | 
| 81 |  | /// | 
| 82 |  | /// The following diagram visualizes a scenario where 2 `Bytes` instances make | 
| 83 |  | /// use of an `Arc`-based backing storage, and provide access to different views: | 
| 84 |  | /// | 
| 85 |  | /// ```text | 
| 86 |  | /// | 
| 87 |  | ///    Arc ptrs                   ┌─────────┐ | 
| 88 |  | ///    ________________________ / │ Bytes 2 │ | 
| 89 |  | ///   /                           └─────────┘ | 
| 90 |  | ///  /          ┌───────────┐     |         | | 
| 91 |  | /// |_________/ │  Bytes 1  │     |         | | 
| 92 |  | /// |           └───────────┘     |         | | 
| 93 |  | /// |           |           | ___/ data     | tail | 
| 94 |  | /// |      data |      tail |/              | | 
| 95 |  | /// v           v           v               v | 
| 96 |  | /// ┌─────┬─────┬───────────┬───────────────┬─────┐ | 
| 97 |  | /// │ Arc │     │           │               │     │ | 
| 98 |  | /// └─────┴─────┴───────────┴───────────────┴─────┘ | 
| 99 |  | /// ``` | 
| 100 |  | pub struct Bytes { | 
| 101 |  |     ptr: *const u8, | 
| 102 |  |     len: usize, | 
| 103 |  |     // inlined "trait object" | 
| 104 |  |     data: AtomicPtr<()>, | 
| 105 |  |     vtable: &'static Vtable, | 
| 106 |  | } | 
| 107 |  |  | 
| 108 |  | pub(crate) struct Vtable { | 
| 109 |  |     /// fn(data, ptr, len) | 
| 110 |  |     pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, | 
| 111 |  |     /// fn(data, ptr, len) | 
| 112 |  |     /// | 
| 113 |  |     /// takes `Bytes` to value | 
| 114 |  |     pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>, | 
| 115 |  |     /// fn(data) | 
| 116 |  |     pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool, | 
| 117 |  |     /// fn(data, ptr, len) | 
| 118 |  |     pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), | 
| 119 |  | } | 
| 120 |  |  | 
| 121 |  | impl Bytes { | 
| 122 |  |     /// Creates a new empty `Bytes`. | 
| 123 |  |     /// | 
| 124 |  |     /// This will not allocate and the returned `Bytes` handle will be empty. | 
| 125 |  |     /// | 
| 126 |  |     /// # Examples | 
| 127 |  |     /// | 
| 128 |  |     /// ``` | 
| 129 |  |     /// use bytes::Bytes; | 
| 130 |  |     /// | 
| 131 |  |     /// let b = Bytes::new(); | 
| 132 |  |     /// assert_eq!(&b[..], b""); | 
| 133 |  |     /// ``` | 
| 134 |  |     #[inline] | 
| 135 |  |     #[cfg(not(all(loom, test)))] | 
| 136 | 0 |     pub const fn new() -> Self { | 
| 137 | 0 |         // Make it a named const to work around | 
| 138 | 0 |         // "unsizing casts are not allowed in const fn" | 
| 139 | 0 |         const EMPTY: &[u8] = &[]; | 
| 140 | 0 |         Bytes::from_static(EMPTY) | 
| 141 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes>::newUnexecuted instantiation: <bytes::bytes::Bytes>::new | 
| 142 |  |  | 
| 143 |  |     #[cfg(all(loom, test))] | 
| 144 |  |     pub fn new() -> Self { | 
| 145 |  |         const EMPTY: &[u8] = &[]; | 
| 146 |  |         Bytes::from_static(EMPTY) | 
| 147 |  |     } | 
| 148 |  |  | 
| 149 |  |     /// Creates a new `Bytes` from a static slice. | 
| 150 |  |     /// | 
| 151 |  |     /// The returned `Bytes` will point directly to the static slice. There is | 
| 152 |  |     /// no allocating or copying. | 
| 153 |  |     /// | 
| 154 |  |     /// # Examples | 
| 155 |  |     /// | 
| 156 |  |     /// ``` | 
| 157 |  |     /// use bytes::Bytes; | 
| 158 |  |     /// | 
| 159 |  |     /// let b = Bytes::from_static(b"hello"); | 
| 160 |  |     /// assert_eq!(&b[..], b"hello"); | 
| 161 |  |     /// ``` | 
| 162 |  |     #[inline] | 
| 163 |  |     #[cfg(not(all(loom, test)))] | 
| 164 | 0 |     pub const fn from_static(bytes: &'static [u8]) -> Self { | 
| 165 | 0 |         Bytes { | 
| 166 | 0 |             ptr: bytes.as_ptr(), | 
| 167 | 0 |             len: bytes.len(), | 
| 168 | 0 |             data: AtomicPtr::new(ptr::null_mut()), | 
| 169 | 0 |             vtable: &STATIC_VTABLE, | 
| 170 | 0 |         } | 
| 171 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes>::from_staticUnexecuted instantiation: <bytes::bytes::Bytes>::from_static | 
| 172 |  |  | 
| 173 |  |     #[cfg(all(loom, test))] | 
| 174 |  |     pub fn from_static(bytes: &'static [u8]) -> Self { | 
| 175 |  |         Bytes { | 
| 176 |  |             ptr: bytes.as_ptr(), | 
| 177 |  |             len: bytes.len(), | 
| 178 |  |             data: AtomicPtr::new(ptr::null_mut()), | 
| 179 |  |             vtable: &STATIC_VTABLE, | 
| 180 |  |         } | 
| 181 |  |     } | 
| 182 |  |  | 
| 183 |  |     /// Returns the number of bytes contained in this `Bytes`. | 
| 184 |  |     /// | 
| 185 |  |     /// # Examples | 
| 186 |  |     /// | 
| 187 |  |     /// ``` | 
| 188 |  |     /// use bytes::Bytes; | 
| 189 |  |     /// | 
| 190 |  |     /// let b = Bytes::from(&b"hello"[..]); | 
| 191 |  |     /// assert_eq!(b.len(), 5); | 
| 192 |  |     /// ``` | 
| 193 |  |     #[inline] | 
| 194 | 0 |     pub const fn len(&self) -> usize { | 
| 195 | 0 |         self.len | 
| 196 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes>::lenUnexecuted instantiation: <bytes::bytes::Bytes>::len | 
| 197 |  |  | 
| 198 |  |     /// Returns true if the `Bytes` has a length of 0. | 
| 199 |  |     /// | 
| 200 |  |     /// # Examples | 
| 201 |  |     /// | 
| 202 |  |     /// ``` | 
| 203 |  |     /// use bytes::Bytes; | 
| 204 |  |     /// | 
| 205 |  |     /// let b = Bytes::new(); | 
| 206 |  |     /// assert!(b.is_empty()); | 
| 207 |  |     /// ``` | 
| 208 |  |     #[inline] | 
| 209 | 0 |     pub const fn is_empty(&self) -> bool { | 
| 210 | 0 |         self.len == 0 | 
| 211 | 0 |     } | 
| 212 |  |  | 
| 213 |  |     /// Returns true if this is the only reference to the data. | 
| 214 |  |     /// | 
| 215 |  |     /// Always returns false if the data is backed by a static slice. | 
| 216 |  |     /// | 
| 217 |  |     /// The result of this method may be invalidated immediately if another | 
| 218 |  |     /// thread clones this value while this is being called. Ensure you have | 
| 219 |  |     /// unique access to this value (`&mut Bytes`) first if you need to be | 
| 220 |  |     /// certain the result is valid (i.e. for safety reasons) | 
| 221 |  |     /// # Examples | 
| 222 |  |     /// | 
| 223 |  |     /// ``` | 
| 224 |  |     /// use bytes::Bytes; | 
| 225 |  |     /// | 
| 226 |  |     /// let a = Bytes::from(vec![1, 2, 3]); | 
| 227 |  |     /// assert!(a.is_unique()); | 
| 228 |  |     /// let b = a.clone(); | 
| 229 |  |     /// assert!(!a.is_unique()); | 
| 230 |  |     /// ``` | 
| 231 | 0 |     pub fn is_unique(&self) -> bool { | 
| 232 | 0 |         unsafe { (self.vtable.is_unique)(&self.data) } | 
| 233 | 0 |     } | 
| 234 |  |  | 
| 235 |  |     /// Creates `Bytes` instance from slice, by copying it. | 
| 236 | 0 |     pub fn copy_from_slice(data: &[u8]) -> Self { | 
| 237 | 0 |         data.to_vec().into() | 
| 238 | 0 |     } | 
| 239 |  |  | 
| 240 |  |     /// Returns a slice of self for the provided range. | 
| 241 |  |     /// | 
| 242 |  |     /// This will increment the reference count for the underlying memory and | 
| 243 |  |     /// return a new `Bytes` handle set to the slice. | 
| 244 |  |     /// | 
| 245 |  |     /// This operation is `O(1)`. | 
| 246 |  |     /// | 
| 247 |  |     /// # Examples | 
| 248 |  |     /// | 
| 249 |  |     /// ``` | 
| 250 |  |     /// use bytes::Bytes; | 
| 251 |  |     /// | 
| 252 |  |     /// let a = Bytes::from(&b"hello world"[..]); | 
| 253 |  |     /// let b = a.slice(2..5); | 
| 254 |  |     /// | 
| 255 |  |     /// assert_eq!(&b[..], b"llo"); | 
| 256 |  |     /// ``` | 
| 257 |  |     /// | 
| 258 |  |     /// # Panics | 
| 259 |  |     /// | 
| 260 |  |     /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing | 
| 261 |  |     /// will panic. | 
| 262 | 0 |     pub fn slice(&self, range: impl RangeBounds<usize>) -> Self { | 
| 263 | 0 |         use core::ops::Bound; | 
| 264 | 0 | 
 | 
| 265 | 0 |         let len = self.len(); | 
| 266 |  |  | 
| 267 | 0 |         let begin = match range.start_bound() { | 
| 268 | 0 |             Bound::Included(&n) => n, | 
| 269 | 0 |             Bound::Excluded(&n) => n.checked_add(1).expect("out of range"), | 
| 270 | 0 |             Bound::Unbounded => 0, | 
| 271 |  |         }; | 
| 272 |  |  | 
| 273 | 0 |         let end = match range.end_bound() { | 
| 274 | 0 |             Bound::Included(&n) => n.checked_add(1).expect("out of range"), | 
| 275 | 0 |             Bound::Excluded(&n) => n, | 
| 276 | 0 |             Bound::Unbounded => len, | 
| 277 |  |         }; | 
| 278 |  |  | 
| 279 | 0 |         assert!( | 
| 280 | 0 |             begin <= end, | 
| 281 | 0 |             "range start must not be greater than end: {:?} <= {:?}", | 
| 282 |  |             begin, | 
| 283 |  |             end, | 
| 284 |  |         ); | 
| 285 | 0 |         assert!( | 
| 286 | 0 |             end <= len, | 
| 287 | 0 |             "range end out of bounds: {:?} <= {:?}", | 
| 288 |  |             end, | 
| 289 |  |             len, | 
| 290 |  |         ); | 
| 291 |  |  | 
| 292 | 0 |         if end == begin { | 
| 293 | 0 |             return Bytes::new(); | 
| 294 | 0 |         } | 
| 295 | 0 | 
 | 
| 296 | 0 |         let mut ret = self.clone(); | 
| 297 | 0 | 
 | 
| 298 | 0 |         ret.len = end - begin; | 
| 299 | 0 |         ret.ptr = unsafe { ret.ptr.add(begin) }; | 
| 300 | 0 | 
 | 
| 301 | 0 |         ret | 
| 302 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes>::slice::<core::ops::range::Range<usize>>Unexecuted instantiation: <bytes::bytes::Bytes>::slice::<core::ops::range::RangeTo<usize>> | 
| 303 |  |  | 
| 304 |  |     /// Returns a slice of self that is equivalent to the given `subset`. | 
| 305 |  |     /// | 
| 306 |  |     /// When processing a `Bytes` buffer with other tools, one often gets a | 
| 307 |  |     /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it. | 
| 308 |  |     /// This function turns that `&[u8]` into another `Bytes`, as if one had | 
| 309 |  |     /// called `self.slice()` with the offsets that correspond to `subset`. | 
| 310 |  |     /// | 
| 311 |  |     /// This operation is `O(1)`. | 
| 312 |  |     /// | 
| 313 |  |     /// # Examples | 
| 314 |  |     /// | 
| 315 |  |     /// ``` | 
| 316 |  |     /// use bytes::Bytes; | 
| 317 |  |     /// | 
| 318 |  |     /// let bytes = Bytes::from(&b"012345678"[..]); | 
| 319 |  |     /// let as_slice = bytes.as_ref(); | 
| 320 |  |     /// let subset = &as_slice[2..6]; | 
| 321 |  |     /// let subslice = bytes.slice_ref(&subset); | 
| 322 |  |     /// assert_eq!(&subslice[..], b"2345"); | 
| 323 |  |     /// ``` | 
| 324 |  |     /// | 
| 325 |  |     /// # Panics | 
| 326 |  |     /// | 
| 327 |  |     /// Requires that the given `sub` slice is in fact contained within the | 
| 328 |  |     /// `Bytes` buffer; otherwise this function will panic. | 
| 329 | 0 |     pub fn slice_ref(&self, subset: &[u8]) -> Self { | 
| 330 | 0 |         // Empty slice and empty Bytes may have their pointers reset | 
| 331 | 0 |         // so explicitly allow empty slice to be a subslice of any slice. | 
| 332 | 0 |         if subset.is_empty() { | 
| 333 | 0 |             return Bytes::new(); | 
| 334 | 0 |         } | 
| 335 | 0 | 
 | 
| 336 | 0 |         let bytes_p = self.as_ptr() as usize; | 
| 337 | 0 |         let bytes_len = self.len(); | 
| 338 | 0 | 
 | 
| 339 | 0 |         let sub_p = subset.as_ptr() as usize; | 
| 340 | 0 |         let sub_len = subset.len(); | 
| 341 | 0 | 
 | 
| 342 | 0 |         assert!( | 
| 343 | 0 |             sub_p >= bytes_p, | 
| 344 | 0 |             "subset pointer ({:p}) is smaller than self pointer ({:p})", | 
| 345 | 0 |             subset.as_ptr(), | 
| 346 | 0 |             self.as_ptr(), | 
| 347 |  |         ); | 
| 348 | 0 |         assert!( | 
| 349 | 0 |             sub_p + sub_len <= bytes_p + bytes_len, | 
| 350 | 0 |             "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})", | 
| 351 | 0 |             self.as_ptr(), | 
| 352 | 0 |             bytes_len, | 
| 353 | 0 |             subset.as_ptr(), | 
| 354 |  |             sub_len, | 
| 355 |  |         ); | 
| 356 |  |  | 
| 357 | 0 |         let sub_offset = sub_p - bytes_p; | 
| 358 | 0 | 
 | 
| 359 | 0 |         self.slice(sub_offset..(sub_offset + sub_len)) | 
| 360 | 0 |     } | 
| 361 |  |  | 
| 362 |  |     /// Splits the bytes into two at the given index. | 
| 363 |  |     /// | 
| 364 |  |     /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` | 
| 365 |  |     /// contains elements `[at, len)`. | 
| 366 |  |     /// | 
| 367 |  |     /// This is an `O(1)` operation that just increases the reference count and | 
| 368 |  |     /// sets a few indices. | 
| 369 |  |     /// | 
| 370 |  |     /// # Examples | 
| 371 |  |     /// | 
| 372 |  |     /// ``` | 
| 373 |  |     /// use bytes::Bytes; | 
| 374 |  |     /// | 
| 375 |  |     /// let mut a = Bytes::from(&b"hello world"[..]); | 
| 376 |  |     /// let b = a.split_off(5); | 
| 377 |  |     /// | 
| 378 |  |     /// assert_eq!(&a[..], b"hello"); | 
| 379 |  |     /// assert_eq!(&b[..], b" world"); | 
| 380 |  |     /// ``` | 
| 381 |  |     /// | 
| 382 |  |     /// # Panics | 
| 383 |  |     /// | 
| 384 |  |     /// Panics if `at > len`. | 
| 385 |  |     #[must_use = "consider Bytes::truncate if you don't need the other half"] | 
| 386 | 0 |     pub fn split_off(&mut self, at: usize) -> Self { | 
| 387 | 0 |         assert!( | 
| 388 | 0 |             at <= self.len(), | 
| 389 | 0 |             "split_off out of bounds: {:?} <= {:?}", | 
| 390 | 0 |             at, | 
| 391 | 0 |             self.len(), | 
| 392 |  |         ); | 
| 393 |  |  | 
| 394 | 0 |         if at == self.len() { | 
| 395 | 0 |             return Bytes::new(); | 
| 396 | 0 |         } | 
| 397 | 0 | 
 | 
| 398 | 0 |         if at == 0 { | 
| 399 | 0 |             return mem::replace(self, Bytes::new()); | 
| 400 | 0 |         } | 
| 401 | 0 | 
 | 
| 402 | 0 |         let mut ret = self.clone(); | 
| 403 | 0 | 
 | 
| 404 | 0 |         self.len = at; | 
| 405 | 0 | 
 | 
| 406 | 0 |         unsafe { ret.inc_start(at) }; | 
| 407 | 0 | 
 | 
| 408 | 0 |         ret | 
| 409 | 0 |     } | 
| 410 |  |  | 
| 411 |  |     /// Splits the bytes into two at the given index. | 
| 412 |  |     /// | 
| 413 |  |     /// Afterwards `self` contains elements `[at, len)`, and the returned | 
| 414 |  |     /// `Bytes` contains elements `[0, at)`. | 
| 415 |  |     /// | 
| 416 |  |     /// This is an `O(1)` operation that just increases the reference count and | 
| 417 |  |     /// sets a few indices. | 
| 418 |  |     /// | 
| 419 |  |     /// # Examples | 
| 420 |  |     /// | 
| 421 |  |     /// ``` | 
| 422 |  |     /// use bytes::Bytes; | 
| 423 |  |     /// | 
| 424 |  |     /// let mut a = Bytes::from(&b"hello world"[..]); | 
| 425 |  |     /// let b = a.split_to(5); | 
| 426 |  |     /// | 
| 427 |  |     /// assert_eq!(&a[..], b" world"); | 
| 428 |  |     /// assert_eq!(&b[..], b"hello"); | 
| 429 |  |     /// ``` | 
| 430 |  |     /// | 
| 431 |  |     /// # Panics | 
| 432 |  |     /// | 
| 433 |  |     /// Panics if `at > len`. | 
| 434 |  |     #[must_use = "consider Bytes::advance if you don't need the other half"] | 
| 435 | 0 |     pub fn split_to(&mut self, at: usize) -> Self { | 
| 436 | 0 |         assert!( | 
| 437 | 0 |             at <= self.len(), | 
| 438 | 0 |             "split_to out of bounds: {:?} <= {:?}", | 
| 439 | 0 |             at, | 
| 440 | 0 |             self.len(), | 
| 441 |  |         ); | 
| 442 |  |  | 
| 443 | 0 |         if at == self.len() { | 
| 444 | 0 |             return mem::replace(self, Bytes::new()); | 
| 445 | 0 |         } | 
| 446 | 0 | 
 | 
| 447 | 0 |         if at == 0 { | 
| 448 | 0 |             return Bytes::new(); | 
| 449 | 0 |         } | 
| 450 | 0 | 
 | 
| 451 | 0 |         let mut ret = self.clone(); | 
| 452 | 0 | 
 | 
| 453 | 0 |         unsafe { self.inc_start(at) }; | 
| 454 | 0 | 
 | 
| 455 | 0 |         ret.len = at; | 
| 456 | 0 |         ret | 
| 457 | 0 |     } | 
| 458 |  |  | 
| 459 |  |     /// Shortens the buffer, keeping the first `len` bytes and dropping the | 
| 460 |  |     /// rest. | 
| 461 |  |     /// | 
| 462 |  |     /// If `len` is greater than the buffer's current length, this has no | 
| 463 |  |     /// effect. | 
| 464 |  |     /// | 
| 465 |  |     /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the | 
| 466 |  |     /// excess bytes to be returned instead of dropped. | 
| 467 |  |     /// | 
| 468 |  |     /// # Examples | 
| 469 |  |     /// | 
| 470 |  |     /// ``` | 
| 471 |  |     /// use bytes::Bytes; | 
| 472 |  |     /// | 
| 473 |  |     /// let mut buf = Bytes::from(&b"hello world"[..]); | 
| 474 |  |     /// buf.truncate(5); | 
| 475 |  |     /// assert_eq!(buf, b"hello"[..]); | 
| 476 |  |     /// ``` | 
| 477 |  |     #[inline] | 
| 478 | 0 |     pub fn truncate(&mut self, len: usize) { | 
| 479 | 0 |         if len < self.len { | 
| 480 |  |             // The Vec "promotable" vtables do not store the capacity, | 
| 481 |  |             // so we cannot truncate while using this repr. We *have* to | 
| 482 |  |             // promote using `split_off` so the capacity can be stored. | 
| 483 | 0 |             if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE | 
| 484 | 0 |                 || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE | 
| 485 | 0 |             { | 
| 486 | 0 |                 drop(self.split_off(len)); | 
| 487 | 0 |             } else { | 
| 488 | 0 |                 self.len = len; | 
| 489 | 0 |             } | 
| 490 | 0 |         } | 
| 491 | 0 |     } | 
| 492 |  |  | 
| 493 |  |     /// Clears the buffer, removing all data. | 
| 494 |  |     /// | 
| 495 |  |     /// # Examples | 
| 496 |  |     /// | 
| 497 |  |     /// ``` | 
| 498 |  |     /// use bytes::Bytes; | 
| 499 |  |     /// | 
| 500 |  |     /// let mut buf = Bytes::from(&b"hello world"[..]); | 
| 501 |  |     /// buf.clear(); | 
| 502 |  |     /// assert!(buf.is_empty()); | 
| 503 |  |     /// ``` | 
| 504 |  |     #[inline] | 
| 505 | 0 |     pub fn clear(&mut self) { | 
| 506 | 0 |         self.truncate(0); | 
| 507 | 0 |     } | 
| 508 |  |  | 
| 509 |  |     #[inline] | 
| 510 | 0 |     pub(crate) unsafe fn with_vtable( | 
| 511 | 0 |         ptr: *const u8, | 
| 512 | 0 |         len: usize, | 
| 513 | 0 |         data: AtomicPtr<()>, | 
| 514 | 0 |         vtable: &'static Vtable, | 
| 515 | 0 |     ) -> Bytes { | 
| 516 | 0 |         Bytes { | 
| 517 | 0 |             ptr, | 
| 518 | 0 |             len, | 
| 519 | 0 |             data, | 
| 520 | 0 |             vtable, | 
| 521 | 0 |         } | 
| 522 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes>::with_vtableUnexecuted instantiation: <bytes::bytes::Bytes>::with_vtable | 
| 523 |  |  | 
| 524 |  |     // private | 
| 525 |  |  | 
| 526 |  |     #[inline] | 
| 527 | 30.0k |     fn as_slice(&self) -> &[u8] { | 
| 528 | 30.0k |         unsafe { slice::from_raw_parts(self.ptr, self.len) } | 
| 529 | 30.0k |     } Unexecuted instantiation: <bytes::bytes::Bytes>::as_sliceUnexecuted instantiation: <bytes::bytes::Bytes>::as_slice<bytes::bytes::Bytes>::as_slice| Line | Count | Source |  | 527 | 30.0k |     fn as_slice(&self) -> &[u8] { |  | 528 | 30.0k |         unsafe { slice::from_raw_parts(self.ptr, self.len) } |  | 529 | 30.0k |     } | 
Unexecuted instantiation: <bytes::bytes::Bytes>::as_slice | 
| 530 |  |  | 
| 531 |  |     #[inline] | 
| 532 | 0 |     unsafe fn inc_start(&mut self, by: usize) { | 
| 533 | 0 |         // should already be asserted, but debug assert for tests | 
| 534 | 0 |         debug_assert!(self.len >= by, "internal: inc_start out of bounds"); | 
| 535 | 0 |         self.len -= by; | 
| 536 | 0 |         self.ptr = self.ptr.add(by); | 
| 537 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes>::inc_startUnexecuted instantiation: <bytes::bytes::Bytes>::inc_start | 
| 538 |  | } | 
| 539 |  |  | 
| 540 |  | // Vtable must enforce this behavior | 
| 541 |  | unsafe impl Send for Bytes {} | 
| 542 |  | unsafe impl Sync for Bytes {} | 
| 543 |  |  | 
| 544 |  | impl Drop for Bytes { | 
| 545 |  |     #[inline] | 
| 546 | 30.0k |     fn drop(&mut self) { | 
| 547 | 30.0k |         unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) } | 
| 548 | 30.0k |     } Unexecuted instantiation: <bytes::bytes::Bytes as core::ops::drop::Drop>::dropUnexecuted instantiation: <bytes::bytes::Bytes as core::ops::drop::Drop>::dropUnexecuted instantiation: <bytes::bytes::Bytes as core::ops::drop::Drop>::dropUnexecuted instantiation: <bytes::bytes::Bytes as core::ops::drop::Drop>::drop<bytes::bytes::Bytes as core::ops::drop::Drop>::drop| Line | Count | Source |  | 546 | 30.0k |     fn drop(&mut self) { |  | 547 | 30.0k |         unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) } |  | 548 | 30.0k |     } | 
 | 
| 549 |  | } | 
| 550 |  |  | 
| 551 |  | impl Clone for Bytes { | 
| 552 |  |     #[inline] | 
| 553 | 0 |     fn clone(&self) -> Bytes { | 
| 554 | 0 |         unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } | 
| 555 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes as core::clone::Clone>::cloneUnexecuted instantiation: <bytes::bytes::Bytes as core::clone::Clone>::clone | 
| 556 |  | } | 
| 557 |  |  | 
| 558 |  | impl Buf for Bytes { | 
| 559 |  |     #[inline] | 
| 560 | 0 |     fn remaining(&self) -> usize { | 
| 561 | 0 |         self.len() | 
| 562 | 0 |     } | 
| 563 |  |  | 
| 564 |  |     #[inline] | 
| 565 | 0 |     fn chunk(&self) -> &[u8] { | 
| 566 | 0 |         self.as_slice() | 
| 567 | 0 |     } | 
| 568 |  |  | 
| 569 |  |     #[inline] | 
| 570 | 0 |     fn advance(&mut self, cnt: usize) { | 
| 571 | 0 |         assert!( | 
| 572 | 0 |             cnt <= self.len(), | 
| 573 | 0 |             "cannot advance past `remaining`: {:?} <= {:?}", | 
| 574 | 0 |             cnt, | 
| 575 | 0 |             self.len(), | 
| 576 |  |         ); | 
| 577 |  |  | 
| 578 | 0 |         unsafe { | 
| 579 | 0 |             self.inc_start(cnt); | 
| 580 | 0 |         } | 
| 581 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes as bytes::buf::buf_impl::Buf>::advanceUnexecuted instantiation: <bytes::bytes::Bytes as bytes::buf::buf_impl::Buf>::advance | 
| 582 |  |  | 
| 583 | 0 |     fn copy_to_bytes(&mut self, len: usize) -> Self { | 
| 584 | 0 |         if len == self.remaining() { | 
| 585 | 0 |             core::mem::replace(self, Bytes::new()) | 
| 586 |  |         } else { | 
| 587 | 0 |             let ret = self.slice(..len); | 
| 588 | 0 |             self.advance(len); | 
| 589 | 0 |             ret | 
| 590 |  |         } | 
| 591 | 0 |     } | 
| 592 |  | } | 
| 593 |  |  | 
| 594 |  | impl Deref for Bytes { | 
| 595 |  |     type Target = [u8]; | 
| 596 |  |  | 
| 597 |  |     #[inline] | 
| 598 | 0 |     fn deref(&self) -> &[u8] { | 
| 599 | 0 |         self.as_slice() | 
| 600 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes as core::ops::deref::Deref>::derefUnexecuted instantiation: <bytes::bytes::Bytes as core::ops::deref::Deref>::derefUnexecuted instantiation: <bytes::bytes::Bytes as core::ops::deref::Deref>::derefUnexecuted instantiation: <bytes::bytes::Bytes as core::ops::deref::Deref>::deref | 
| 601 |  | } | 
| 602 |  |  | 
| 603 |  | impl AsRef<[u8]> for Bytes { | 
| 604 |  |     #[inline] | 
| 605 | 0 |     fn as_ref(&self) -> &[u8] { | 
| 606 | 0 |         self.as_slice() | 
| 607 | 0 |     } | 
| 608 |  | } | 
| 609 |  |  | 
| 610 |  | impl hash::Hash for Bytes { | 
| 611 | 0 |     fn hash<H>(&self, state: &mut H) | 
| 612 | 0 |     where | 
| 613 | 0 |         H: hash::Hasher, | 
| 614 | 0 |     { | 
| 615 | 0 |         self.as_slice().hash(state); | 
| 616 | 0 |     } | 
| 617 |  | } | 
| 618 |  |  | 
| 619 |  | impl Borrow<[u8]> for Bytes { | 
| 620 | 0 |     fn borrow(&self) -> &[u8] { | 
| 621 | 0 |         self.as_slice() | 
| 622 | 0 |     } | 
| 623 |  | } | 
| 624 |  |  | 
| 625 |  | impl IntoIterator for Bytes { | 
| 626 |  |     type Item = u8; | 
| 627 |  |     type IntoIter = IntoIter<Bytes>; | 
| 628 |  |  | 
| 629 | 0 |     fn into_iter(self) -> Self::IntoIter { | 
| 630 | 0 |         IntoIter::new(self) | 
| 631 | 0 |     } | 
| 632 |  | } | 
| 633 |  |  | 
| 634 |  | impl<'a> IntoIterator for &'a Bytes { | 
| 635 |  |     type Item = &'a u8; | 
| 636 |  |     type IntoIter = core::slice::Iter<'a, u8>; | 
| 637 |  |  | 
| 638 | 0 |     fn into_iter(self) -> Self::IntoIter { | 
| 639 | 0 |         self.as_slice().iter() | 
| 640 | 0 |     } | 
| 641 |  | } | 
| 642 |  |  | 
| 643 |  | impl FromIterator<u8> for Bytes { | 
| 644 | 0 |     fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self { | 
| 645 | 0 |         Vec::from_iter(into_iter).into() | 
| 646 | 0 |     } | 
| 647 |  | } | 
| 648 |  |  | 
| 649 |  | // impl Eq | 
| 650 |  |  | 
| 651 |  | impl PartialEq for Bytes { | 
| 652 | 15.0k |     fn eq(&self, other: &Bytes) -> bool { | 
| 653 | 15.0k |         self.as_slice() == other.as_slice() | 
| 654 | 15.0k |     } | 
| 655 |  | } | 
| 656 |  |  | 
| 657 |  | impl PartialOrd for Bytes { | 
| 658 | 0 |     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { | 
| 659 | 0 |         self.as_slice().partial_cmp(other.as_slice()) | 
| 660 | 0 |     } | 
| 661 |  | } | 
| 662 |  |  | 
| 663 |  | impl Ord for Bytes { | 
| 664 | 0 |     fn cmp(&self, other: &Bytes) -> cmp::Ordering { | 
| 665 | 0 |         self.as_slice().cmp(other.as_slice()) | 
| 666 | 0 |     } | 
| 667 |  | } | 
| 668 |  |  | 
| 669 |  | impl Eq for Bytes {} | 
| 670 |  |  | 
| 671 |  | impl PartialEq<[u8]> for Bytes { | 
| 672 | 0 |     fn eq(&self, other: &[u8]) -> bool { | 
| 673 | 0 |         self.as_slice() == other | 
| 674 | 0 |     } | 
| 675 |  | } | 
| 676 |  |  | 
| 677 |  | impl PartialOrd<[u8]> for Bytes { | 
| 678 | 0 |     fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> { | 
| 679 | 0 |         self.as_slice().partial_cmp(other) | 
| 680 | 0 |     } | 
| 681 |  | } | 
| 682 |  |  | 
| 683 |  | impl PartialEq<Bytes> for [u8] { | 
| 684 | 0 |     fn eq(&self, other: &Bytes) -> bool { | 
| 685 | 0 |         *other == *self | 
| 686 | 0 |     } | 
| 687 |  | } | 
| 688 |  |  | 
| 689 |  | impl PartialOrd<Bytes> for [u8] { | 
| 690 | 0 |     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { | 
| 691 | 0 |         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) | 
| 692 | 0 |     } | 
| 693 |  | } | 
| 694 |  |  | 
| 695 |  | impl PartialEq<str> for Bytes { | 
| 696 | 0 |     fn eq(&self, other: &str) -> bool { | 
| 697 | 0 |         self.as_slice() == other.as_bytes() | 
| 698 | 0 |     } | 
| 699 |  | } | 
| 700 |  |  | 
| 701 |  | impl PartialOrd<str> for Bytes { | 
| 702 | 0 |     fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> { | 
| 703 | 0 |         self.as_slice().partial_cmp(other.as_bytes()) | 
| 704 | 0 |     } | 
| 705 |  | } | 
| 706 |  |  | 
| 707 |  | impl PartialEq<Bytes> for str { | 
| 708 | 0 |     fn eq(&self, other: &Bytes) -> bool { | 
| 709 | 0 |         *other == *self | 
| 710 | 0 |     } | 
| 711 |  | } | 
| 712 |  |  | 
| 713 |  | impl PartialOrd<Bytes> for str { | 
| 714 | 0 |     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { | 
| 715 | 0 |         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) | 
| 716 | 0 |     } | 
| 717 |  | } | 
| 718 |  |  | 
| 719 |  | impl PartialEq<Vec<u8>> for Bytes { | 
| 720 | 0 |     fn eq(&self, other: &Vec<u8>) -> bool { | 
| 721 | 0 |         *self == other[..] | 
| 722 | 0 |     } | 
| 723 |  | } | 
| 724 |  |  | 
| 725 |  | impl PartialOrd<Vec<u8>> for Bytes { | 
| 726 | 0 |     fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> { | 
| 727 | 0 |         self.as_slice().partial_cmp(&other[..]) | 
| 728 | 0 |     } | 
| 729 |  | } | 
| 730 |  |  | 
| 731 |  | impl PartialEq<Bytes> for Vec<u8> { | 
| 732 | 0 |     fn eq(&self, other: &Bytes) -> bool { | 
| 733 | 0 |         *other == *self | 
| 734 | 0 |     } | 
| 735 |  | } | 
| 736 |  |  | 
| 737 |  | impl PartialOrd<Bytes> for Vec<u8> { | 
| 738 | 0 |     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { | 
| 739 | 0 |         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) | 
| 740 | 0 |     } | 
| 741 |  | } | 
| 742 |  |  | 
| 743 |  | impl PartialEq<String> for Bytes { | 
| 744 | 0 |     fn eq(&self, other: &String) -> bool { | 
| 745 | 0 |         *self == other[..] | 
| 746 | 0 |     } | 
| 747 |  | } | 
| 748 |  |  | 
| 749 |  | impl PartialOrd<String> for Bytes { | 
| 750 | 0 |     fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> { | 
| 751 | 0 |         self.as_slice().partial_cmp(other.as_bytes()) | 
| 752 | 0 |     } | 
| 753 |  | } | 
| 754 |  |  | 
| 755 |  | impl PartialEq<Bytes> for String { | 
| 756 | 0 |     fn eq(&self, other: &Bytes) -> bool { | 
| 757 | 0 |         *other == *self | 
| 758 | 0 |     } | 
| 759 |  | } | 
| 760 |  |  | 
| 761 |  | impl PartialOrd<Bytes> for String { | 
| 762 | 0 |     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { | 
| 763 | 0 |         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) | 
| 764 | 0 |     } | 
| 765 |  | } | 
| 766 |  |  | 
| 767 |  | impl PartialEq<Bytes> for &[u8] { | 
| 768 | 0 |     fn eq(&self, other: &Bytes) -> bool { | 
| 769 | 0 |         *other == *self | 
| 770 | 0 |     } | 
| 771 |  | } | 
| 772 |  |  | 
| 773 |  | impl PartialOrd<Bytes> for &[u8] { | 
| 774 | 0 |     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { | 
| 775 | 0 |         <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) | 
| 776 | 0 |     } | 
| 777 |  | } | 
| 778 |  |  | 
| 779 |  | impl PartialEq<Bytes> for &str { | 
| 780 | 0 |     fn eq(&self, other: &Bytes) -> bool { | 
| 781 | 0 |         *other == *self | 
| 782 | 0 |     } | 
| 783 |  | } | 
| 784 |  |  | 
| 785 |  | impl PartialOrd<Bytes> for &str { | 
| 786 | 0 |     fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { | 
| 787 | 0 |         <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) | 
| 788 | 0 |     } | 
| 789 |  | } | 
| 790 |  |  | 
| 791 |  | impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes | 
| 792 |  | where | 
| 793 |  |     Bytes: PartialEq<T>, | 
| 794 |  | { | 
| 795 | 0 |     fn eq(&self, other: &&'a T) -> bool { | 
| 796 | 0 |         *self == **other | 
| 797 | 0 |     } Unexecuted instantiation: <bytes::bytes::Bytes as core::cmp::PartialEq<&[u8]>>::eqUnexecuted instantiation: <bytes::bytes::Bytes as core::cmp::PartialEq<&str>>::eq | 
| 798 |  | } | 
| 799 |  |  | 
| 800 |  | impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes | 
| 801 |  | where | 
| 802 |  |     Bytes: PartialOrd<T>, | 
| 803 |  | { | 
| 804 | 0 |     fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> { | 
| 805 | 0 |         self.partial_cmp(&**other) | 
| 806 | 0 |     } | 
| 807 |  | } | 
| 808 |  |  | 
| 809 |  | // impl From | 
| 810 |  |  | 
| 811 |  | impl Default for Bytes { | 
| 812 |  |     #[inline] | 
| 813 | 0 |     fn default() -> Bytes { | 
| 814 | 0 |         Bytes::new() | 
| 815 | 0 |     } | 
| 816 |  | } | 
| 817 |  |  | 
| 818 |  | impl From<&'static [u8]> for Bytes { | 
| 819 | 0 |     fn from(slice: &'static [u8]) -> Bytes { | 
| 820 | 0 |         Bytes::from_static(slice) | 
| 821 | 0 |     } | 
| 822 |  | } | 
| 823 |  |  | 
| 824 |  | impl From<&'static str> for Bytes { | 
| 825 | 0 |     fn from(slice: &'static str) -> Bytes { | 
| 826 | 0 |         Bytes::from_static(slice.as_bytes()) | 
| 827 | 0 |     } | 
| 828 |  | } | 
| 829 |  |  | 
| 830 |  | impl From<Vec<u8>> for Bytes { | 
| 831 | 30.0k |     fn from(mut vec: Vec<u8>) -> Bytes { | 
| 832 | 30.0k |         let ptr = vec.as_mut_ptr(); | 
| 833 | 30.0k |         let len = vec.len(); | 
| 834 | 30.0k |         let cap = vec.capacity(); | 
| 835 | 30.0k |  | 
| 836 | 30.0k |         // Avoid an extra allocation if possible. | 
| 837 | 30.0k |         if len == cap { | 
| 838 | 30.0k |             return Bytes::from(vec.into_boxed_slice()); | 
| 839 | 0 |         } | 
| 840 | 0 | 
 | 
| 841 | 0 |         let shared = Box::new(Shared { | 
| 842 | 0 |             buf: ptr, | 
| 843 | 0 |             cap, | 
| 844 | 0 |             ref_cnt: AtomicUsize::new(1), | 
| 845 | 0 |         }); | 
| 846 | 0 |         mem::forget(vec); | 
| 847 | 0 | 
 | 
| 848 | 0 |         let shared = Box::into_raw(shared); | 
| 849 | 0 |         // The pointer should be aligned, so this assert should | 
| 850 | 0 |         // always succeed. | 
| 851 | 0 |         debug_assert!( | 
| 852 | 0 |             0 == (shared as usize & KIND_MASK), | 
| 853 |  |             "internal: Box<Shared> should have an aligned pointer", | 
| 854 |  |         ); | 
| 855 | 0 |         Bytes { | 
| 856 | 0 |             ptr, | 
| 857 | 0 |             len, | 
| 858 | 0 |             data: AtomicPtr::new(shared as _), | 
| 859 | 0 |             vtable: &SHARED_VTABLE, | 
| 860 | 0 |         } | 
| 861 | 30.0k |     } | 
| 862 |  | } | 
| 863 |  |  | 
| 864 |  | impl From<Box<[u8]>> for Bytes { | 
| 865 | 30.0k |     fn from(slice: Box<[u8]>) -> Bytes { | 
| 866 | 30.0k |         // Box<[u8]> doesn't contain a heap allocation for empty slices, | 
| 867 | 30.0k |         // so the pointer isn't aligned enough for the KIND_VEC stashing to | 
| 868 | 30.0k |         // work. | 
| 869 | 30.0k |         if slice.is_empty() { | 
| 870 | 0 |             return Bytes::new(); | 
| 871 | 30.0k |         } | 
| 872 | 30.0k |  | 
| 873 | 30.0k |         let len = slice.len(); | 
| 874 | 30.0k |         let ptr = Box::into_raw(slice) as *mut u8; | 
| 875 | 30.0k |  | 
| 876 | 30.0k |         if ptr as usize & 0x1 == 0 { | 
| 877 | 30.0k |             let data = ptr_map(ptr, |addr| addr | KIND_VEC); | 
| 878 | 30.0k |             Bytes { | 
| 879 | 30.0k |                 ptr, | 
| 880 | 30.0k |                 len, | 
| 881 | 30.0k |                 data: AtomicPtr::new(data.cast()), | 
| 882 | 30.0k |                 vtable: &PROMOTABLE_EVEN_VTABLE, | 
| 883 | 30.0k |             } | 
| 884 |  |         } else { | 
| 885 | 0 |             Bytes { | 
| 886 | 0 |                 ptr, | 
| 887 | 0 |                 len, | 
| 888 | 0 |                 data: AtomicPtr::new(ptr.cast()), | 
| 889 | 0 |                 vtable: &PROMOTABLE_ODD_VTABLE, | 
| 890 | 0 |             } | 
| 891 |  |         } | 
| 892 | 30.0k |     } | 
| 893 |  | } | 
| 894 |  |  | 
| 895 |  | impl From<String> for Bytes { | 
| 896 | 0 |     fn from(s: String) -> Bytes { | 
| 897 | 0 |         Bytes::from(s.into_bytes()) | 
| 898 | 0 |     } | 
| 899 |  | } | 
| 900 |  |  | 
| 901 |  | impl From<Bytes> for Vec<u8> { | 
| 902 | 0 |     fn from(bytes: Bytes) -> Vec<u8> { | 
| 903 | 0 |         let bytes = mem::ManuallyDrop::new(bytes); | 
| 904 | 0 |         unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) } | 
| 905 | 0 |     } | 
| 906 |  | } | 
| 907 |  |  | 
| 908 |  | // ===== impl Vtable ===== | 
| 909 |  |  | 
| 910 |  | impl fmt::Debug for Vtable { | 
| 911 | 0 |     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | 
| 912 | 0 |         f.debug_struct("Vtable") | 
| 913 | 0 |             .field("clone", &(self.clone as *const ())) | 
| 914 | 0 |             .field("drop", &(self.drop as *const ())) | 
| 915 | 0 |             .finish() | 
| 916 | 0 |     } | 
| 917 |  | } | 
| 918 |  |  | 
| 919 |  | // ===== impl StaticVtable ===== | 
| 920 |  |  | 
| 921 |  | const STATIC_VTABLE: Vtable = Vtable { | 
| 922 |  |     clone: static_clone, | 
| 923 |  |     to_vec: static_to_vec, | 
| 924 |  |     is_unique: static_is_unique, | 
| 925 |  |     drop: static_drop, | 
| 926 |  | }; | 
| 927 |  |  | 
| 928 | 0 | unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { | 
| 929 | 0 |     let slice = slice::from_raw_parts(ptr, len); | 
| 930 | 0 |     Bytes::from_static(slice) | 
| 931 | 0 | } | 
| 932 |  |  | 
| 933 | 0 | unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { | 
| 934 | 0 |     let slice = slice::from_raw_parts(ptr, len); | 
| 935 | 0 |     slice.to_vec() | 
| 936 | 0 | } | 
| 937 |  |  | 
| 938 | 0 | fn static_is_unique(_: &AtomicPtr<()>) -> bool { | 
| 939 | 0 |     false | 
| 940 | 0 | } Unexecuted instantiation: bytes::bytes::static_is_uniqueUnexecuted instantiation: bytes::bytes::static_is_unique | 
| 941 |  |  | 
| 942 | 0 | unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { | 
| 943 | 0 |     // nothing to drop for &'static [u8] | 
| 944 | 0 | } Unexecuted instantiation: bytes::bytes::static_dropUnexecuted instantiation: bytes::bytes::static_drop | 
| 945 |  |  | 
| 946 |  | // ===== impl PromotableVtable ===== | 
| 947 |  |  | 
| 948 |  | static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { | 
| 949 |  |     clone: promotable_even_clone, | 
| 950 |  |     to_vec: promotable_even_to_vec, | 
| 951 |  |     is_unique: promotable_is_unique, | 
| 952 |  |     drop: promotable_even_drop, | 
| 953 |  | }; | 
| 954 |  |  | 
| 955 |  | static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { | 
| 956 |  |     clone: promotable_odd_clone, | 
| 957 |  |     to_vec: promotable_odd_to_vec, | 
| 958 |  |     is_unique: promotable_is_unique, | 
| 959 |  |     drop: promotable_odd_drop, | 
| 960 |  | }; | 
| 961 |  |  | 
| 962 | 0 | unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { | 
| 963 | 0 |     let shared = data.load(Ordering::Acquire); | 
| 964 | 0 |     let kind = shared as usize & KIND_MASK; | 
| 965 | 0 | 
 | 
| 966 | 0 |     if kind == KIND_ARC { | 
| 967 | 0 |         shallow_clone_arc(shared.cast(), ptr, len) | 
| 968 |  |     } else { | 
| 969 | 0 |         debug_assert_eq!(kind, KIND_VEC); | 
| 970 | 0 |         let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); | 
| 971 | 0 |         shallow_clone_vec(data, shared, buf, ptr, len) | 
| 972 |  |     } | 
| 973 | 0 | } | 
| 974 |  |  | 
| 975 | 0 | unsafe fn promotable_to_vec( | 
| 976 | 0 |     data: &AtomicPtr<()>, | 
| 977 | 0 |     ptr: *const u8, | 
| 978 | 0 |     len: usize, | 
| 979 | 0 |     f: fn(*mut ()) -> *mut u8, | 
| 980 | 0 | ) -> Vec<u8> { | 
| 981 | 0 |     let shared = data.load(Ordering::Acquire); | 
| 982 | 0 |     let kind = shared as usize & KIND_MASK; | 
| 983 | 0 | 
 | 
| 984 | 0 |     if kind == KIND_ARC { | 
| 985 | 0 |         shared_to_vec_impl(shared.cast(), ptr, len) | 
| 986 |  |     } else { | 
| 987 |  |         // If Bytes holds a Vec, then the offset must be 0. | 
| 988 | 0 |         debug_assert_eq!(kind, KIND_VEC); | 
| 989 |  |  | 
| 990 | 0 |         let buf = f(shared); | 
| 991 | 0 | 
 | 
| 992 | 0 |         let cap = (ptr as usize - buf as usize) + len; | 
| 993 | 0 | 
 | 
| 994 | 0 |         // Copy back buffer | 
| 995 | 0 |         ptr::copy(ptr, buf, len); | 
| 996 | 0 | 
 | 
| 997 | 0 |         Vec::from_raw_parts(buf, len, cap) | 
| 998 |  |     } | 
| 999 | 0 | } | 
| 1000 |  |  | 
| 1001 | 0 | unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { | 
| 1002 | 0 |     promotable_to_vec(data, ptr, len, |shared| { | 
| 1003 | 0 |         ptr_map(shared.cast(), |addr| addr & !KIND_MASK) | 
| 1004 | 0 |     }) | 
| 1005 | 0 | } | 
| 1006 |  |  | 
| 1007 | 30.0k | unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { | 
| 1008 | 30.0k |     data.with_mut(|shared| { | 
| 1009 | 30.0k |         let shared = *shared; | 
| 1010 | 30.0k |         let kind = shared as usize & KIND_MASK; | 
| 1011 | 30.0k |  | 
| 1012 | 30.0k |         if kind == KIND_ARC { | 
| 1013 | 0 |             release_shared(shared.cast()); | 
| 1014 | 0 |         } else { | 
| 1015 | 30.0k |             debug_assert_eq!(kind, KIND_VEC); | 
| 1016 | 30.0k |             let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); | 
| 1017 | 30.0k |             free_boxed_slice(buf, ptr, len); | 
| 1018 |  |         } | 
| 1019 | 30.0k |     }); | 
| 1020 | 30.0k | } | 
| 1021 |  |  | 
| 1022 | 0 | unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { | 
| 1023 | 0 |     let shared = data.load(Ordering::Acquire); | 
| 1024 | 0 |     let kind = shared as usize & KIND_MASK; | 
| 1025 | 0 | 
 | 
| 1026 | 0 |     if kind == KIND_ARC { | 
| 1027 | 0 |         shallow_clone_arc(shared as _, ptr, len) | 
| 1028 |  |     } else { | 
| 1029 | 0 |         debug_assert_eq!(kind, KIND_VEC); | 
| 1030 | 0 |         shallow_clone_vec(data, shared, shared.cast(), ptr, len) | 
| 1031 |  |     } | 
| 1032 | 0 | } | 
| 1033 |  |  | 
| 1034 | 0 | unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { | 
| 1035 | 0 |     promotable_to_vec(data, ptr, len, |shared| shared.cast()) | 
| 1036 | 0 | } | 
| 1037 |  |  | 
| 1038 | 0 | unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { | 
| 1039 | 0 |     data.with_mut(|shared| { | 
| 1040 | 0 |         let shared = *shared; | 
| 1041 | 0 |         let kind = shared as usize & KIND_MASK; | 
| 1042 | 0 | 
 | 
| 1043 | 0 |         if kind == KIND_ARC { | 
| 1044 | 0 |             release_shared(shared.cast()); | 
| 1045 | 0 |         } else { | 
| 1046 | 0 |             debug_assert_eq!(kind, KIND_VEC); | 
| 1047 |  |  | 
| 1048 | 0 |             free_boxed_slice(shared.cast(), ptr, len); | 
| 1049 |  |         } | 
| 1050 | 0 |     }); | 
| 1051 | 0 | } | 
| 1052 |  |  | 
| 1053 | 0 | unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool { | 
| 1054 | 0 |     let shared = data.load(Ordering::Acquire); | 
| 1055 | 0 |     let kind = shared as usize & KIND_MASK; | 
| 1056 | 0 | 
 | 
| 1057 | 0 |     if kind == KIND_ARC { | 
| 1058 | 0 |         let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed); | 
| 1059 | 0 |         ref_cnt == 1 | 
| 1060 |  |     } else { | 
| 1061 | 0 |         true | 
| 1062 |  |     } | 
| 1063 | 0 | } | 
| 1064 |  |  | 
| 1065 | 30.0k | unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { | 
| 1066 | 30.0k |     let cap = (offset as usize - buf as usize) + len; | 
| 1067 | 30.0k |     dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) | 
| 1068 | 30.0k | } | 
| 1069 |  |  | 
| 1070 |  | // ===== impl SharedVtable ===== | 
| 1071 |  |  | 
| 1072 |  | struct Shared { | 
| 1073 |  |     // Holds arguments to dealloc upon Drop, but otherwise doesn't use them | 
| 1074 |  |     buf: *mut u8, | 
| 1075 |  |     cap: usize, | 
| 1076 |  |     ref_cnt: AtomicUsize, | 
| 1077 |  | } | 
| 1078 |  |  | 
| 1079 |  | impl Drop for Shared { | 
| 1080 | 0 |     fn drop(&mut self) { | 
| 1081 | 0 |         unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) } | 
| 1082 | 0 |     } | 
| 1083 |  | } | 
| 1084 |  |  | 
| 1085 |  | // Assert that the alignment of `Shared` is divisible by 2. | 
| 1086 |  | // This is a necessary invariant since we depend on allocating `Shared` a | 
| 1087 |  | // shared object to implicitly carry the `KIND_ARC` flag in its pointer. | 
| 1088 |  | // This flag is set when the LSB is 0. | 
| 1089 |  | const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. | 
| 1090 |  |  | 
| 1091 |  | static SHARED_VTABLE: Vtable = Vtable { | 
| 1092 |  |     clone: shared_clone, | 
| 1093 |  |     to_vec: shared_to_vec, | 
| 1094 |  |     is_unique: shared_is_unique, | 
| 1095 |  |     drop: shared_drop, | 
| 1096 |  | }; | 
| 1097 |  |  | 
| 1098 |  | const KIND_ARC: usize = 0b0; | 
| 1099 |  | const KIND_VEC: usize = 0b1; | 
| 1100 |  | const KIND_MASK: usize = 0b1; | 
| 1101 |  |  | 
| 1102 | 0 | unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { | 
| 1103 | 0 |     let shared = data.load(Ordering::Relaxed); | 
| 1104 | 0 |     shallow_clone_arc(shared as _, ptr, len) | 
| 1105 | 0 | } | 
| 1106 |  |  | 
| 1107 | 0 | unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> { | 
| 1108 | 0 |     // Check that the ref_cnt is 1 (unique). | 
| 1109 | 0 |     // | 
| 1110 | 0 |     // If it is unique, then it is set to 0 with AcqRel fence for the same | 
| 1111 | 0 |     // reason in release_shared. | 
| 1112 | 0 |     // | 
| 1113 | 0 |     // Otherwise, we take the other branch and call release_shared. | 
| 1114 | 0 |     if (*shared) | 
| 1115 | 0 |         .ref_cnt | 
| 1116 | 0 |         .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) | 
| 1117 | 0 |         .is_ok() | 
| 1118 |  |     { | 
| 1119 | 0 |         let buf = (*shared).buf; | 
| 1120 | 0 |         let cap = (*shared).cap; | 
| 1121 | 0 | 
 | 
| 1122 | 0 |         // Deallocate Shared | 
| 1123 | 0 |         drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>)); | 
| 1124 | 0 | 
 | 
| 1125 | 0 |         // Copy back buffer | 
| 1126 | 0 |         ptr::copy(ptr, buf, len); | 
| 1127 | 0 | 
 | 
| 1128 | 0 |         Vec::from_raw_parts(buf, len, cap) | 
| 1129 |  |     } else { | 
| 1130 | 0 |         let v = slice::from_raw_parts(ptr, len).to_vec(); | 
| 1131 | 0 |         release_shared(shared); | 
| 1132 | 0 |         v | 
| 1133 |  |     } | 
| 1134 | 0 | } | 
| 1135 |  |  | 
| 1136 | 0 | unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { | 
| 1137 | 0 |     shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) | 
| 1138 | 0 | } | 
| 1139 |  |  | 
| 1140 | 0 | pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool { | 
| 1141 | 0 |     let shared = data.load(Ordering::Acquire); | 
| 1142 | 0 |     let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed); | 
| 1143 | 0 |     ref_cnt == 1 | 
| 1144 | 0 | } | 
| 1145 |  |  | 
| 1146 | 0 | unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { | 
| 1147 | 0 |     data.with_mut(|shared| { | 
| 1148 | 0 |         release_shared(shared.cast()); | 
| 1149 | 0 |     }); | 
| 1150 | 0 | } | 
| 1151 |  |  | 
| 1152 | 0 | unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { | 
| 1153 | 0 |     let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); | 
| 1154 | 0 | 
 | 
| 1155 | 0 |     if old_size > usize::MAX >> 1 { | 
| 1156 | 0 |         crate::abort(); | 
| 1157 | 0 |     } | 
| 1158 | 0 | 
 | 
| 1159 | 0 |     Bytes { | 
| 1160 | 0 |         ptr, | 
| 1161 | 0 |         len, | 
| 1162 | 0 |         data: AtomicPtr::new(shared as _), | 
| 1163 | 0 |         vtable: &SHARED_VTABLE, | 
| 1164 | 0 |     } | 
| 1165 | 0 | } | 
| 1166 |  |  | 
| 1167 |  | #[cold] | 
| 1168 | 0 | unsafe fn shallow_clone_vec( | 
| 1169 | 0 |     atom: &AtomicPtr<()>, | 
| 1170 | 0 |     ptr: *const (), | 
| 1171 | 0 |     buf: *mut u8, | 
| 1172 | 0 |     offset: *const u8, | 
| 1173 | 0 |     len: usize, | 
| 1174 | 0 | ) -> Bytes { | 
| 1175 | 0 |     // If  the buffer is still tracked in a `Vec<u8>`. It is time to | 
| 1176 | 0 |     // promote the vec to an `Arc`. This could potentially be called | 
| 1177 | 0 |     // concurrently, so some care must be taken. | 
| 1178 | 0 | 
 | 
| 1179 | 0 |     // First, allocate a new `Shared` instance containing the | 
| 1180 | 0 |     // `Vec` fields. It's important to note that `ptr`, `len`, | 
| 1181 | 0 |     // and `cap` cannot be mutated without having `&mut self`. | 
| 1182 | 0 |     // This means that these fields will not be concurrently | 
| 1183 | 0 |     // updated and since the buffer hasn't been promoted to an | 
| 1184 | 0 |     // `Arc`, those three fields still are the components of the | 
| 1185 | 0 |     // vector. | 
| 1186 | 0 |     let shared = Box::new(Shared { | 
| 1187 | 0 |         buf, | 
| 1188 | 0 |         cap: (offset as usize - buf as usize) + len, | 
| 1189 | 0 |         // Initialize refcount to 2. One for this reference, and one | 
| 1190 | 0 |         // for the new clone that will be returned from | 
| 1191 | 0 |         // `shallow_clone`. | 
| 1192 | 0 |         ref_cnt: AtomicUsize::new(2), | 
| 1193 | 0 |     }); | 
| 1194 | 0 | 
 | 
| 1195 | 0 |     let shared = Box::into_raw(shared); | 
| 1196 | 0 | 
 | 
| 1197 | 0 |     // The pointer should be aligned, so this assert should | 
| 1198 | 0 |     // always succeed. | 
| 1199 | 0 |     debug_assert!( | 
| 1200 | 0 |         0 == (shared as usize & KIND_MASK), | 
| 1201 |  |         "internal: Box<Shared> should have an aligned pointer", | 
| 1202 |  |     ); | 
| 1203 |  |  | 
| 1204 |  |     // Try compare & swapping the pointer into the `arc` field. | 
| 1205 |  |     // `Release` is used synchronize with other threads that | 
| 1206 |  |     // will load the `arc` field. | 
| 1207 |  |     // | 
| 1208 |  |     // If the `compare_exchange` fails, then the thread lost the | 
| 1209 |  |     // race to promote the buffer to shared. The `Acquire` | 
| 1210 |  |     // ordering will synchronize with the `compare_exchange` | 
| 1211 |  |     // that happened in the other thread and the `Shared` | 
| 1212 |  |     // pointed to by `actual` will be visible. | 
| 1213 | 0 |     match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) { | 
| 1214 | 0 |         Ok(actual) => { | 
| 1215 | 0 |             debug_assert!(actual as usize == ptr as usize); | 
| 1216 |  |             // The upgrade was successful, the new handle can be | 
| 1217 |  |             // returned. | 
| 1218 | 0 |             Bytes { | 
| 1219 | 0 |                 ptr: offset, | 
| 1220 | 0 |                 len, | 
| 1221 | 0 |                 data: AtomicPtr::new(shared as _), | 
| 1222 | 0 |                 vtable: &SHARED_VTABLE, | 
| 1223 | 0 |             } | 
| 1224 |  |         } | 
| 1225 | 0 |         Err(actual) => { | 
| 1226 | 0 |             // The upgrade failed, a concurrent clone happened. Release | 
| 1227 | 0 |             // the allocation that was made in this thread, it will not | 
| 1228 | 0 |             // be needed. | 
| 1229 | 0 |             let shared = Box::from_raw(shared); | 
| 1230 | 0 |             mem::forget(*shared); | 
| 1231 | 0 | 
 | 
| 1232 | 0 |             // Buffer already promoted to shared storage, so increment ref | 
| 1233 | 0 |             // count. | 
| 1234 | 0 |             shallow_clone_arc(actual as _, offset, len) | 
| 1235 |  |         } | 
| 1236 |  |     } | 
| 1237 | 0 | } | 
| 1238 |  |  | 
| 1239 | 0 | unsafe fn release_shared(ptr: *mut Shared) { | 
| 1240 | 0 |     // `Shared` storage... follow the drop steps from Arc. | 
| 1241 | 0 |     if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { | 
| 1242 | 0 |         return; | 
| 1243 | 0 |     } | 
| 1244 | 0 | 
 | 
| 1245 | 0 |     // This fence is needed to prevent reordering of use of the data and | 
| 1246 | 0 |     // deletion of the data.  Because it is marked `Release`, the decreasing | 
| 1247 | 0 |     // of the reference count synchronizes with this `Acquire` fence. This | 
| 1248 | 0 |     // means that use of the data happens before decreasing the reference | 
| 1249 | 0 |     // count, which happens before this fence, which happens before the | 
| 1250 | 0 |     // deletion of the data. | 
| 1251 | 0 |     // | 
| 1252 | 0 |     // As explained in the [Boost documentation][1], | 
| 1253 | 0 |     // | 
| 1254 | 0 |     // > It is important to enforce any possible access to the object in one | 
| 1255 | 0 |     // > thread (through an existing reference) to *happen before* deleting | 
| 1256 | 0 |     // > the object in a different thread. This is achieved by a "release" | 
| 1257 | 0 |     // > operation after dropping a reference (any access to the object | 
| 1258 | 0 |     // > through this reference must obviously happened before), and an | 
| 1259 | 0 |     // > "acquire" operation before deleting the object. | 
| 1260 | 0 |     // | 
| 1261 | 0 |     // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) | 
| 1262 | 0 |     // | 
| 1263 | 0 |     // Thread sanitizer does not support atomic fences. Use an atomic load | 
| 1264 | 0 |     // instead. | 
| 1265 | 0 |     (*ptr).ref_cnt.load(Ordering::Acquire); | 
| 1266 | 0 | 
 | 
| 1267 | 0 |     // Drop the data | 
| 1268 | 0 |     drop(Box::from_raw(ptr)); | 
| 1269 | 0 | } | 
| 1270 |  |  | 
| 1271 |  | // Ideally we would always use this version of `ptr_map` since it is strict | 
| 1272 |  | // provenance compatible, but it results in worse codegen. We will however still | 
| 1273 |  | // use it on miri because it gives better diagnostics for people who test bytes | 
| 1274 |  | // code with miri. | 
| 1275 |  | // | 
| 1276 |  | // See https://github.com/tokio-rs/bytes/pull/545 for more info. | 
| 1277 |  | #[cfg(miri)] | 
| 1278 |  | fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 | 
| 1279 |  | where | 
| 1280 |  |     F: FnOnce(usize) -> usize, | 
| 1281 |  | { | 
| 1282 |  |     let old_addr = ptr as usize; | 
| 1283 |  |     let new_addr = f(old_addr); | 
| 1284 |  |     let diff = new_addr.wrapping_sub(old_addr); | 
| 1285 |  |     ptr.wrapping_add(diff) | 
| 1286 |  | } | 
| 1287 |  |  | 
| 1288 |  | #[cfg(not(miri))] | 
| 1289 | 60.0k | fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 | 
| 1290 | 60.0k | where | 
| 1291 | 60.0k |     F: FnOnce(usize) -> usize, | 
| 1292 | 60.0k | { | 
| 1293 | 60.0k |     let old_addr = ptr as usize; | 
| 1294 | 60.0k |     let new_addr = f(old_addr); | 
| 1295 | 60.0k |     new_addr as *mut u8 | 
| 1296 | 60.0k | } bytes::bytes::ptr_map::<<bytes::bytes::Bytes as core::convert::From<alloc::boxed::Box<[u8]>>>::from::{closure#0}>| Line | Count | Source |  | 1289 | 30.0k | fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 |  | 1290 | 30.0k | where |  | 1291 | 30.0k |     F: FnOnce(usize) -> usize, |  | 1292 | 30.0k | { |  | 1293 | 30.0k |     let old_addr = ptr as usize; |  | 1294 | 30.0k |     let new_addr = f(old_addr); |  | 1295 | 30.0k |     new_addr as *mut u8 |  | 1296 | 30.0k | } | 
Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_clone::{closure#0}>Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_to_vec::{closure#0}::{closure#0}>bytes::bytes::ptr_map::<bytes::bytes::promotable_even_drop::{closure#0}::{closure#0}>| Line | Count | Source |  | 1289 | 30.0k | fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 |  | 1290 | 30.0k | where |  | 1291 | 30.0k |     F: FnOnce(usize) -> usize, |  | 1292 | 30.0k | { |  | 1293 | 30.0k |     let old_addr = ptr as usize; |  | 1294 | 30.0k |     let new_addr = f(old_addr); |  | 1295 | 30.0k |     new_addr as *mut u8 |  | 1296 | 30.0k | } | 
 | 
| 1297 |  |  | 
| 1298 |  | // compile-fails | 
| 1299 |  |  | 
| 1300 |  | /// ```compile_fail | 
| 1301 |  | /// use bytes::Bytes; | 
| 1302 |  | /// #[deny(unused_must_use)] | 
| 1303 |  | /// { | 
| 1304 |  | ///     let mut b1 = Bytes::from("hello world"); | 
| 1305 |  | ///     b1.split_to(6); | 
| 1306 |  | /// } | 
| 1307 |  | /// ``` | 
| 1308 | 0 | fn _split_to_must_use() {} | 
| 1309 |  |  | 
| 1310 |  | /// ```compile_fail | 
| 1311 |  | /// use bytes::Bytes; | 
| 1312 |  | /// #[deny(unused_must_use)] | 
| 1313 |  | /// { | 
| 1314 |  | ///     let mut b1 = Bytes::from("hello world"); | 
| 1315 |  | ///     b1.split_off(6); | 
| 1316 |  | /// } | 
| 1317 |  | /// ``` | 
| 1318 | 0 | fn _split_off_must_use() {} | 
| 1319 |  |  | 
| 1320 |  | // fuzz tests | 
| 1321 |  | #[cfg(all(test, loom))] | 
| 1322 |  | mod fuzz { | 
| 1323 |  |     use loom::sync::Arc; | 
| 1324 |  |     use loom::thread; | 
| 1325 |  |  | 
| 1326 |  |     use super::Bytes; | 
| 1327 |  |     #[test] | 
| 1328 |  |     fn bytes_cloning_vec() { | 
| 1329 |  |         loom::model(|| { | 
| 1330 |  |             let a = Bytes::from(b"abcdefgh".to_vec()); | 
| 1331 |  |             let addr = a.as_ptr() as usize; | 
| 1332 |  |  | 
| 1333 |  |             // test the Bytes::clone is Sync by putting it in an Arc | 
| 1334 |  |             let a1 = Arc::new(a); | 
| 1335 |  |             let a2 = a1.clone(); | 
| 1336 |  |  | 
| 1337 |  |             let t1 = thread::spawn(move || { | 
| 1338 |  |                 let b: Bytes = (*a1).clone(); | 
| 1339 |  |                 assert_eq!(b.as_ptr() as usize, addr); | 
| 1340 |  |             }); | 
| 1341 |  |  | 
| 1342 |  |             let t2 = thread::spawn(move || { | 
| 1343 |  |                 let b: Bytes = (*a2).clone(); | 
| 1344 |  |                 assert_eq!(b.as_ptr() as usize, addr); | 
| 1345 |  |             }); | 
| 1346 |  |  | 
| 1347 |  |             t1.join().unwrap(); | 
| 1348 |  |             t2.join().unwrap(); | 
| 1349 |  |         }); | 
| 1350 |  |     } | 
| 1351 |  | } |