Coverage Report

Created: 2025-12-31 06:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/bytes-1.11.0/src/bytes.rs
Line
Count
Source
1
use core::mem::{self, ManuallyDrop};
2
use core::ops::{Deref, RangeBounds};
3
use core::ptr::NonNull;
4
use core::{cmp, fmt, hash, ptr, slice};
5
6
use alloc::{
7
    alloc::{dealloc, Layout},
8
    borrow::Borrow,
9
    boxed::Box,
10
    string::String,
11
    vec::Vec,
12
};
13
14
use crate::buf::IntoIter;
15
#[allow(unused)]
16
use crate::loom::sync::atomic::AtomicMut;
17
use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18
use crate::{Buf, BytesMut};
19
20
/// A cheaply cloneable and sliceable chunk of contiguous memory.
21
///
22
/// `Bytes` is an efficient container for storing and operating on contiguous
23
/// slices of memory. It is intended for use primarily in networking code, but
24
/// could have applications elsewhere as well.
25
///
26
/// `Bytes` values facilitate zero-copy network programming by allowing multiple
27
/// `Bytes` objects to point to the same underlying memory.
28
///
29
/// `Bytes` does not have a single implementation. It is an interface, whose
30
/// exact behavior is implemented through dynamic dispatch in several underlying
31
/// implementations of `Bytes`.
32
///
33
/// All `Bytes` implementations must fulfill the following requirements:
34
/// - They are cheaply cloneable and thereby shareable between an unlimited amount
35
///   of components, for example by modifying a reference count.
36
/// - Instances can be sliced to refer to a subset of the original buffer.
37
///
38
/// ```
39
/// use bytes::Bytes;
40
///
41
/// let mut mem = Bytes::from("Hello world");
42
/// let a = mem.slice(0..5);
43
///
44
/// assert_eq!(a, "Hello");
45
///
46
/// let b = mem.split_to(6);
47
///
48
/// assert_eq!(mem, "world");
49
/// assert_eq!(b, "Hello ");
50
/// ```
51
///
52
/// # Memory layout
53
///
54
/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
55
/// to track information about which segment of the underlying memory the
56
/// `Bytes` handle has access to.
57
///
58
/// `Bytes` keeps both a pointer to the shared state containing the full memory
59
/// slice and a pointer to the start of the region visible by the handle.
60
/// `Bytes` also tracks the length of its view into the memory.
61
///
62
/// # Sharing
63
///
64
/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
65
/// how sharing/cloning is implemented in detail.
66
/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
67
/// cloning the backing storage in order to share it behind multiple `Bytes`
68
/// instances.
69
///
70
/// For `Bytes` implementations which refer to constant memory (e.g. created
71
/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
72
///
73
/// For `Bytes` implementations which point to a reference counted shared storage
74
/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
75
/// reference count.
76
///
77
/// Due to this mechanism, multiple `Bytes` instances may point to the same
78
/// shared memory region.
79
/// Each `Bytes` instance can point to different sections within that
80
/// memory region, and `Bytes` instances may or may not have overlapping views
81
/// into the memory.
82
///
83
/// The following diagram visualizes a scenario where 2 `Bytes` instances make
84
/// use of an `Arc`-based backing storage, and provide access to different views:
85
///
86
/// ```text
87
///
88
///    Arc ptrs                   ┌─────────┐
89
///    ________________________ / │ Bytes 2 │
90
///   /                           └─────────┘
91
///  /          ┌───────────┐     |         |
92
/// |_________/ │  Bytes 1  │     |         |
93
/// |           └───────────┘     |         |
94
/// |           |           | ___/ data     | tail
95
/// |      data |      tail |/              |
96
/// v           v           v               v
97
/// ┌─────┬─────┬───────────┬───────────────┬─────┐
98
/// │ Arc │     │           │               │     │
99
/// └─────┴─────┴───────────┴───────────────┴─────┘
100
/// ```
101
pub struct Bytes {
102
    ptr: *const u8,
103
    len: usize,
104
    // inlined "trait object"
105
    data: AtomicPtr<()>,
106
    vtable: &'static Vtable,
107
}
108
109
pub(crate) struct Vtable {
110
    /// fn(data, ptr, len)
111
    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112
    /// fn(data, ptr, len)
113
    ///
114
    /// `into_*` consumes the `Bytes`, returning the respective value.
115
    pub into_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116
    pub into_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117
    /// fn(data)
118
    pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119
    /// fn(data, ptr, len)
120
    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121
}
122
123
impl Bytes {
124
    /// Creates a new empty `Bytes`.
125
    ///
126
    /// This will not allocate and the returned `Bytes` handle will be empty.
127
    ///
128
    /// # Examples
129
    ///
130
    /// ```
131
    /// use bytes::Bytes;
132
    ///
133
    /// let b = Bytes::new();
134
    /// assert_eq!(&b[..], b"");
135
    /// ```
136
    #[inline]
137
    #[cfg(not(all(loom, test)))]
138
24
    pub const fn new() -> Self {
139
        // Make it a named const to work around
140
        // "unsizing casts are not allowed in const fn"
141
        const EMPTY: &[u8] = &[];
142
24
        Bytes::from_static(EMPTY)
143
24
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::new
Unexecuted instantiation: <bytes::bytes::Bytes>::new
<bytes::bytes::Bytes>::new
Line
Count
Source
138
24
    pub const fn new() -> Self {
139
        // Make it a named const to work around
140
        // "unsizing casts are not allowed in const fn"
141
        const EMPTY: &[u8] = &[];
142
24
        Bytes::from_static(EMPTY)
143
24
    }
144
145
    /// Creates a new empty `Bytes`.
146
    #[cfg(all(loom, test))]
147
    pub fn new() -> Self {
148
        const EMPTY: &[u8] = &[];
149
        Bytes::from_static(EMPTY)
150
    }
151
152
    /// Creates a new `Bytes` from a static slice.
153
    ///
154
    /// The returned `Bytes` will point directly to the static slice. There is
155
    /// no allocating or copying.
156
    ///
157
    /// # Examples
158
    ///
159
    /// ```
160
    /// use bytes::Bytes;
161
    ///
162
    /// let b = Bytes::from_static(b"hello");
163
    /// assert_eq!(&b[..], b"hello");
164
    /// ```
165
    #[inline]
166
    #[cfg(not(all(loom, test)))]
167
421
    pub const fn from_static(bytes: &'static [u8]) -> Self {
168
421
        Bytes {
169
421
            ptr: bytes.as_ptr(),
170
421
            len: bytes.len(),
171
421
            data: AtomicPtr::new(ptr::null_mut()),
172
421
            vtable: &STATIC_VTABLE,
173
421
        }
174
421
    }
<bytes::bytes::Bytes>::from_static
Line
Count
Source
167
162
    pub const fn from_static(bytes: &'static [u8]) -> Self {
168
162
        Bytes {
169
162
            ptr: bytes.as_ptr(),
170
162
            len: bytes.len(),
171
162
            data: AtomicPtr::new(ptr::null_mut()),
172
162
            vtable: &STATIC_VTABLE,
173
162
        }
174
162
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::from_static
<bytes::bytes::Bytes>::from_static
Line
Count
Source
167
259
    pub const fn from_static(bytes: &'static [u8]) -> Self {
168
259
        Bytes {
169
259
            ptr: bytes.as_ptr(),
170
259
            len: bytes.len(),
171
259
            data: AtomicPtr::new(ptr::null_mut()),
172
259
            vtable: &STATIC_VTABLE,
173
259
        }
174
259
    }
175
176
    /// Creates a new `Bytes` from a static slice.
177
    #[cfg(all(loom, test))]
178
    pub fn from_static(bytes: &'static [u8]) -> Self {
179
        Bytes {
180
            ptr: bytes.as_ptr(),
181
            len: bytes.len(),
182
            data: AtomicPtr::new(ptr::null_mut()),
183
            vtable: &STATIC_VTABLE,
184
        }
185
    }
186
187
    /// Creates a new `Bytes` with length zero and the given pointer as the address.
188
226
    fn new_empty_with_ptr(ptr: *const u8) -> Self {
189
226
        debug_assert!(!ptr.is_null());
190
191
        // Detach this pointer's provenance from whichever allocation it came from, and reattach it
192
        // to the provenance of the fake ZST [u8;0] at the same address.
193
226
        let ptr = without_provenance(ptr as usize);
194
195
226
        Bytes {
196
226
            ptr,
197
226
            len: 0,
198
226
            data: AtomicPtr::new(ptr::null_mut()),
199
226
            vtable: &STATIC_VTABLE,
200
226
        }
201
226
    }
202
203
    /// Create [Bytes] with a buffer whose lifetime is controlled
204
    /// via an explicit owner.
205
    ///
206
    /// A common use case is to zero-copy construct from mapped memory.
207
    ///
208
    /// ```
209
    /// # struct File;
210
    /// #
211
    /// # impl File {
212
    /// #     pub fn open(_: &str) -> Result<Self, ()> {
213
    /// #         Ok(Self)
214
    /// #     }
215
    /// # }
216
    /// #
217
    /// # mod memmap2 {
218
    /// #     pub struct Mmap;
219
    /// #
220
    /// #     impl Mmap {
221
    /// #         pub unsafe fn map(_file: &super::File) -> Result<Self, ()> {
222
    /// #             Ok(Self)
223
    /// #         }
224
    /// #     }
225
    /// #
226
    /// #     impl AsRef<[u8]> for Mmap {
227
    /// #         fn as_ref(&self) -> &[u8] {
228
    /// #             b"buf"
229
    /// #         }
230
    /// #     }
231
    /// # }
232
    /// use bytes::Bytes;
233
    /// use memmap2::Mmap;
234
    ///
235
    /// # fn main() -> Result<(), ()> {
236
    /// let file = File::open("upload_bundle.tar.gz")?;
237
    /// let mmap = unsafe { Mmap::map(&file) }?;
238
    /// let b = Bytes::from_owner(mmap);
239
    /// # Ok(())
240
    /// # }
241
    /// ```
242
    ///
243
    /// The `owner` will be transferred to the constructed [Bytes] object, which
244
    /// will ensure it is dropped once all remaining clones of the constructed
245
    /// object are dropped. The owner will then be responsible for dropping the
246
    /// specified region of memory as part of its [Drop] implementation.
247
    ///
248
    /// Note that converting [Bytes] constructed from an owner into a [BytesMut]
249
    /// will always create a deep copy of the buffer into newly allocated memory.
250
0
    pub fn from_owner<T>(owner: T) -> Self
251
0
    where
252
0
        T: AsRef<[u8]> + Send + 'static,
253
    {
254
        // Safety & Miri:
255
        // The ownership of `owner` is first transferred to the `Owned` wrapper and `Bytes` object.
256
        // This ensures that the owner is pinned in memory, allowing us to call `.as_ref()` safely
257
        // since the lifetime of the owner is controlled by the lifetime of the new `Bytes` object,
258
        // and the lifetime of the resulting borrowed `&[u8]` matches that of the owner.
259
        // Note that this remains safe so long as we only call `.as_ref()` once.
260
        //
261
        // There are some additional special considerations here:
262
        //   * We rely on Bytes's Drop impl to clean up memory should `.as_ref()` panic.
263
        //   * Setting the `ptr` and `len` on the bytes object last (after moving the owner to
264
        //     Bytes) allows Miri checks to pass since it avoids obtaining the `&[u8]` slice
265
        //     from a stack-owned Box.
266
        // More details on this: https://github.com/tokio-rs/bytes/pull/742/#discussion_r1813375863
267
        //                  and: https://github.com/tokio-rs/bytes/pull/742/#discussion_r1813316032
268
269
0
        let owned = Box::into_raw(Box::new(Owned {
270
0
            ref_cnt: AtomicUsize::new(1),
271
0
            owner,
272
0
        }));
273
274
0
        let mut ret = Bytes {
275
0
            ptr: NonNull::dangling().as_ptr(),
276
0
            len: 0,
277
0
            data: AtomicPtr::new(owned.cast()),
278
0
            vtable: &Owned::<T>::VTABLE,
279
0
        };
280
281
0
        let buf = unsafe { &*owned }.owner.as_ref();
282
0
        ret.ptr = buf.as_ptr();
283
0
        ret.len = buf.len();
284
285
0
        ret
286
0
    }
287
288
    /// Returns the number of bytes contained in this `Bytes`.
289
    ///
290
    /// # Examples
291
    ///
292
    /// ```
293
    /// use bytes::Bytes;
294
    ///
295
    /// let b = Bytes::from(&b"hello"[..]);
296
    /// assert_eq!(b.len(), 5);
297
    /// ```
298
    #[inline]
299
962
    pub const fn len(&self) -> usize {
300
962
        self.len
301
962
    }
<bytes::bytes::Bytes>::len
Line
Count
Source
299
339
    pub const fn len(&self) -> usize {
300
339
        self.len
301
339
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::len
Unexecuted instantiation: <bytes::bytes::Bytes>::len
<bytes::bytes::Bytes>::len
Line
Count
Source
299
283
    pub const fn len(&self) -> usize {
300
283
        self.len
301
283
    }
<bytes::bytes::Bytes>::len
Line
Count
Source
299
340
    pub const fn len(&self) -> usize {
300
340
        self.len
301
340
    }
302
303
    /// Returns true if the `Bytes` has a length of 0.
304
    ///
305
    /// # Examples
306
    ///
307
    /// ```
308
    /// use bytes::Bytes;
309
    ///
310
    /// let b = Bytes::new();
311
    /// assert!(b.is_empty());
312
    /// ```
313
    #[inline]
314
0
    pub const fn is_empty(&self) -> bool {
315
0
        self.len == 0
316
0
    }
317
318
    /// Returns true if this is the only reference to the data and
319
    /// `Into<BytesMut>` would avoid cloning the underlying buffer.
320
    ///
321
    /// Always returns false if the data is backed by a [static slice](Bytes::from_static),
322
    /// or an [owner](Bytes::from_owner).
323
    ///
324
    /// The result of this method may be invalidated immediately if another
325
    /// thread clones this value while this is being called. Ensure you have
326
    /// unique access to this value (`&mut Bytes`) first if you need to be
327
    /// certain the result is valid (i.e. for safety reasons).
328
    /// # Examples
329
    ///
330
    /// ```
331
    /// use bytes::Bytes;
332
    ///
333
    /// let a = Bytes::from(vec![1, 2, 3]);
334
    /// assert!(a.is_unique());
335
    /// let b = a.clone();
336
    /// assert!(!a.is_unique());
337
    /// ```
338
0
    pub fn is_unique(&self) -> bool {
339
0
        unsafe { (self.vtable.is_unique)(&self.data) }
340
0
    }
341
342
    /// Creates `Bytes` instance from slice, by copying it.
343
0
    pub fn copy_from_slice(data: &[u8]) -> Self {
344
0
        data.to_vec().into()
345
0
    }
346
347
    /// Returns a slice of self for the provided range.
348
    ///
349
    /// This will increment the reference count for the underlying memory and
350
    /// return a new `Bytes` handle set to the slice.
351
    ///
352
    /// This operation is `O(1)`.
353
    ///
354
    /// # Examples
355
    ///
356
    /// ```
357
    /// use bytes::Bytes;
358
    ///
359
    /// let a = Bytes::from(&b"hello world"[..]);
360
    /// let b = a.slice(2..5);
361
    ///
362
    /// assert_eq!(&b[..], b"llo");
363
    /// ```
364
    ///
365
    /// # Panics
366
    ///
367
    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
368
    /// will panic.
369
297
    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
370
        use core::ops::Bound;
371
372
297
        let len = self.len();
373
374
297
        let begin = match range.start_bound() {
375
297
            Bound::Included(&n) => n,
376
0
            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
377
0
            Bound::Unbounded => 0,
378
        };
379
380
297
        let end = match range.end_bound() {
381
0
            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
382
0
            Bound::Excluded(&n) => n,
383
297
            Bound::Unbounded => len,
384
        };
385
386
297
        assert!(
387
297
            begin <= end,
388
0
            "range start must not be greater than end: {:?} <= {:?}",
389
            begin,
390
            end,
391
        );
392
297
        assert!(
393
297
            end <= len,
394
0
            "range end out of bounds: {:?} <= {:?}",
395
            end,
396
            len,
397
        );
398
399
297
        if end == begin {
400
226
            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(begin));
401
71
        }
402
403
71
        let mut ret = self.clone();
404
405
71
        ret.len = end - begin;
406
71
        ret.ptr = unsafe { ret.ptr.add(begin) };
407
408
71
        ret
409
297
    }
<bytes::bytes::Bytes>::slice::<core::ops::range::RangeFrom<usize>>
Line
Count
Source
369
149
    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
370
        use core::ops::Bound;
371
372
149
        let len = self.len();
373
374
149
        let begin = match range.start_bound() {
375
149
            Bound::Included(&n) => n,
376
0
            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
377
0
            Bound::Unbounded => 0,
378
        };
379
380
149
        let end = match range.end_bound() {
381
0
            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
382
0
            Bound::Excluded(&n) => n,
383
149
            Bound::Unbounded => len,
384
        };
385
386
149
        assert!(
387
149
            begin <= end,
388
0
            "range start must not be greater than end: {:?} <= {:?}",
389
            begin,
390
            end,
391
        );
392
149
        assert!(
393
149
            end <= len,
394
0
            "range end out of bounds: {:?} <= {:?}",
395
            end,
396
            len,
397
        );
398
399
149
        if end == begin {
400
112
            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(begin));
401
37
        }
402
403
37
        let mut ret = self.clone();
404
405
37
        ret.len = end - begin;
406
37
        ret.ptr = unsafe { ret.ptr.add(begin) };
407
408
37
        ret
409
149
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::slice::<core::ops::range::Range<usize>>
<bytes::bytes::Bytes>::slice::<core::ops::range::RangeFrom<usize>>
Line
Count
Source
369
148
    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
370
        use core::ops::Bound;
371
372
148
        let len = self.len();
373
374
148
        let begin = match range.start_bound() {
375
148
            Bound::Included(&n) => n,
376
0
            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
377
0
            Bound::Unbounded => 0,
378
        };
379
380
148
        let end = match range.end_bound() {
381
0
            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
382
0
            Bound::Excluded(&n) => n,
383
148
            Bound::Unbounded => len,
384
        };
385
386
148
        assert!(
387
148
            begin <= end,
388
0
            "range start must not be greater than end: {:?} <= {:?}",
389
            begin,
390
            end,
391
        );
392
148
        assert!(
393
148
            end <= len,
394
0
            "range end out of bounds: {:?} <= {:?}",
395
            end,
396
            len,
397
        );
398
399
148
        if end == begin {
400
114
            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(begin));
401
34
        }
402
403
34
        let mut ret = self.clone();
404
405
34
        ret.len = end - begin;
406
34
        ret.ptr = unsafe { ret.ptr.add(begin) };
407
408
34
        ret
409
148
    }
410
411
    /// Returns a slice of self that is equivalent to the given `subset`.
412
    ///
413
    /// When processing a `Bytes` buffer with other tools, one often gets a
414
    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
415
    /// This function turns that `&[u8]` into another `Bytes`, as if one had
416
    /// called `self.slice()` with the offsets that correspond to `subset`.
417
    ///
418
    /// This operation is `O(1)`.
419
    ///
420
    /// # Examples
421
    ///
422
    /// ```
423
    /// use bytes::Bytes;
424
    ///
425
    /// let bytes = Bytes::from(&b"012345678"[..]);
426
    /// let as_slice = bytes.as_ref();
427
    /// let subset = &as_slice[2..6];
428
    /// let subslice = bytes.slice_ref(&subset);
429
    /// assert_eq!(&subslice[..], b"2345");
430
    /// ```
431
    ///
432
    /// # Panics
433
    ///
434
    /// Requires that the given `sub` slice is in fact contained within the
435
    /// `Bytes` buffer; otherwise this function will panic.
436
0
    pub fn slice_ref(&self, subset: &[u8]) -> Self {
437
        // Empty slice and empty Bytes may have their pointers reset
438
        // so explicitly allow empty slice to be a subslice of any slice.
439
0
        if subset.is_empty() {
440
0
            return Bytes::new();
441
0
        }
442
443
0
        let bytes_p = self.as_ptr() as usize;
444
0
        let bytes_len = self.len();
445
446
0
        let sub_p = subset.as_ptr() as usize;
447
0
        let sub_len = subset.len();
448
449
0
        assert!(
450
0
            sub_p >= bytes_p,
451
0
            "subset pointer ({:p}) is smaller than self pointer ({:p})",
452
0
            subset.as_ptr(),
453
0
            self.as_ptr(),
454
        );
455
0
        assert!(
456
0
            sub_p + sub_len <= bytes_p + bytes_len,
457
0
            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
458
0
            self.as_ptr(),
459
            bytes_len,
460
0
            subset.as_ptr(),
461
            sub_len,
462
        );
463
464
0
        let sub_offset = sub_p - bytes_p;
465
466
0
        self.slice(sub_offset..(sub_offset + sub_len))
467
0
    }
468
469
    /// Splits the bytes into two at the given index.
470
    ///
471
    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
472
    /// contains elements `[at, len)`. It's guaranteed that the memory does not
473
    /// move, that is, the address of `self` does not change, and the address of
474
    /// the returned slice is `at` bytes after that.
475
    ///
476
    /// This is an `O(1)` operation that just increases the reference count and
477
    /// sets a few indices.
478
    ///
479
    /// # Examples
480
    ///
481
    /// ```
482
    /// use bytes::Bytes;
483
    ///
484
    /// let mut a = Bytes::from(&b"hello world"[..]);
485
    /// let b = a.split_off(5);
486
    ///
487
    /// assert_eq!(&a[..], b"hello");
488
    /// assert_eq!(&b[..], b" world");
489
    /// ```
490
    ///
491
    /// # Panics
492
    ///
493
    /// Panics if `at > len`.
494
    #[must_use = "consider Bytes::truncate if you don't need the other half"]
495
0
    pub fn split_off(&mut self, at: usize) -> Self {
496
0
        if at == self.len() {
497
0
            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at));
498
0
        }
499
500
0
        if at == 0 {
501
0
            return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr));
502
0
        }
503
504
0
        assert!(
505
0
            at <= self.len(),
506
0
            "split_off out of bounds: {:?} <= {:?}",
507
            at,
508
0
            self.len(),
509
        );
510
511
0
        let mut ret = self.clone();
512
513
0
        self.len = at;
514
515
0
        unsafe { ret.inc_start(at) };
516
517
0
        ret
518
0
    }
519
520
    /// Splits the bytes into two at the given index.
521
    ///
522
    /// Afterwards `self` contains elements `[at, len)`, and the returned
523
    /// `Bytes` contains elements `[0, at)`.
524
    ///
525
    /// This is an `O(1)` operation that just increases the reference count and
526
    /// sets a few indices.
527
    ///
528
    /// # Examples
529
    ///
530
    /// ```
531
    /// use bytes::Bytes;
532
    ///
533
    /// let mut a = Bytes::from(&b"hello world"[..]);
534
    /// let b = a.split_to(5);
535
    ///
536
    /// assert_eq!(&a[..], b" world");
537
    /// assert_eq!(&b[..], b"hello");
538
    /// ```
539
    ///
540
    /// # Panics
541
    ///
542
    /// Panics if `at > len`.
543
    #[must_use = "consider Bytes::advance if you don't need the other half"]
544
0
    pub fn split_to(&mut self, at: usize) -> Self {
545
0
        if at == self.len() {
546
0
            let end_ptr = self.ptr.wrapping_add(at);
547
0
            return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr));
548
0
        }
549
550
0
        if at == 0 {
551
0
            return Bytes::new_empty_with_ptr(self.ptr);
552
0
        }
553
554
0
        assert!(
555
0
            at <= self.len(),
556
0
            "split_to out of bounds: {:?} <= {:?}",
557
            at,
558
0
            self.len(),
559
        );
560
561
0
        let mut ret = self.clone();
562
563
0
        unsafe { self.inc_start(at) };
564
565
0
        ret.len = at;
566
0
        ret
567
0
    }
568
569
    /// Shortens the buffer, keeping the first `len` bytes and dropping the
570
    /// rest.
571
    ///
572
    /// If `len` is greater than the buffer's current length, this has no
573
    /// effect.
574
    ///
575
    /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
576
    /// excess bytes to be returned instead of dropped.
577
    ///
578
    /// # Examples
579
    ///
580
    /// ```
581
    /// use bytes::Bytes;
582
    ///
583
    /// let mut buf = Bytes::from(&b"hello world"[..]);
584
    /// buf.truncate(5);
585
    /// assert_eq!(buf, b"hello"[..]);
586
    /// ```
587
    #[inline]
588
0
    pub fn truncate(&mut self, len: usize) {
589
0
        if len < self.len {
590
            // The Vec "promotable" vtables do not store the capacity,
591
            // so we cannot truncate while using this repr. We *have* to
592
            // promote using `split_off` so the capacity can be stored.
593
0
            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
594
0
                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
595
0
            {
596
0
                drop(self.split_off(len));
597
0
            } else {
598
0
                self.len = len;
599
0
            }
600
0
        }
601
0
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::truncate
Unexecuted instantiation: <bytes::bytes::Bytes>::truncate
602
603
    /// Clears the buffer, removing all data.
604
    ///
605
    /// # Examples
606
    ///
607
    /// ```
608
    /// use bytes::Bytes;
609
    ///
610
    /// let mut buf = Bytes::from(&b"hello world"[..]);
611
    /// buf.clear();
612
    /// assert!(buf.is_empty());
613
    /// ```
614
    #[inline]
615
0
    pub fn clear(&mut self) {
616
0
        self.truncate(0);
617
0
    }
618
619
    /// Try to convert self into `BytesMut`.
620
    ///
621
    /// If `self` is unique for the entire original buffer, this will succeed
622
    /// and return a `BytesMut` with the contents of `self` without copying.
623
    /// If `self` is not unique for the entire original buffer, this will fail
624
    /// and return self.
625
    ///
626
    /// This will also always fail if the buffer was constructed via either
627
    /// [from_owner](Bytes::from_owner) or [from_static](Bytes::from_static).
628
    ///
629
    /// # Examples
630
    ///
631
    /// ```
632
    /// use bytes::{Bytes, BytesMut};
633
    ///
634
    /// let bytes = Bytes::from(b"hello".to_vec());
635
    /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
636
    /// ```
637
0
    pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
638
0
        if self.is_unique() {
639
0
            Ok(self.into())
640
        } else {
641
0
            Err(self)
642
        }
643
0
    }
644
645
    #[inline]
646
8.38M
    pub(crate) unsafe fn with_vtable(
647
8.38M
        ptr: *const u8,
648
8.38M
        len: usize,
649
8.38M
        data: AtomicPtr<()>,
650
8.38M
        vtable: &'static Vtable,
651
8.38M
    ) -> Bytes {
652
8.38M
        Bytes {
653
8.38M
            ptr,
654
8.38M
            len,
655
8.38M
            data,
656
8.38M
            vtable,
657
8.38M
        }
658
8.38M
    }
<bytes::bytes::Bytes>::with_vtable
Line
Count
Source
646
1.58M
    pub(crate) unsafe fn with_vtable(
647
1.58M
        ptr: *const u8,
648
1.58M
        len: usize,
649
1.58M
        data: AtomicPtr<()>,
650
1.58M
        vtable: &'static Vtable,
651
1.58M
    ) -> Bytes {
652
1.58M
        Bytes {
653
1.58M
            ptr,
654
1.58M
            len,
655
1.58M
            data,
656
1.58M
            vtable,
657
1.58M
        }
658
1.58M
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::with_vtable
<bytes::bytes::Bytes>::with_vtable
Line
Count
Source
646
172
    pub(crate) unsafe fn with_vtable(
647
172
        ptr: *const u8,
648
172
        len: usize,
649
172
        data: AtomicPtr<()>,
650
172
        vtable: &'static Vtable,
651
172
    ) -> Bytes {
652
172
        Bytes {
653
172
            ptr,
654
172
            len,
655
172
            data,
656
172
            vtable,
657
172
        }
658
172
    }
<bytes::bytes::Bytes>::with_vtable
Line
Count
Source
646
6.79M
    pub(crate) unsafe fn with_vtable(
647
6.79M
        ptr: *const u8,
648
6.79M
        len: usize,
649
6.79M
        data: AtomicPtr<()>,
650
6.79M
        vtable: &'static Vtable,
651
6.79M
    ) -> Bytes {
652
6.79M
        Bytes {
653
6.79M
            ptr,
654
6.79M
            len,
655
6.79M
            data,
656
6.79M
            vtable,
657
6.79M
        }
658
6.79M
    }
659
660
    // private
661
662
    #[inline]
663
16.7M
    fn as_slice(&self) -> &[u8] {
664
16.7M
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
665
16.7M
    }
<bytes::bytes::Bytes>::as_slice
Line
Count
Source
663
3.17M
    fn as_slice(&self) -> &[u8] {
664
3.17M
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
665
3.17M
    }
<bytes::bytes::Bytes>::as_slice
Line
Count
Source
663
595
    fn as_slice(&self) -> &[u8] {
664
595
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
665
595
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::as_slice
Unexecuted instantiation: <bytes::bytes::Bytes>::as_slice
<bytes::bytes::Bytes>::as_slice
Line
Count
Source
663
13.5M
    fn as_slice(&self) -> &[u8] {
664
13.5M
        unsafe { slice::from_raw_parts(self.ptr, self.len) }
665
13.5M
    }
666
667
    #[inline]
668
283
    unsafe fn inc_start(&mut self, by: usize) {
669
        // should already be asserted, but debug assert for tests
670
283
        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
671
283
        self.len -= by;
672
283
        self.ptr = self.ptr.add(by);
673
283
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::inc_start
Unexecuted instantiation: <bytes::bytes::Bytes>::inc_start
<bytes::bytes::Bytes>::inc_start
Line
Count
Source
668
283
    unsafe fn inc_start(&mut self, by: usize) {
669
        // should already be asserted, but debug assert for tests
670
283
        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
671
283
        self.len -= by;
672
283
        self.ptr = self.ptr.add(by);
673
283
    }
Unexecuted instantiation: <bytes::bytes::Bytes>::inc_start
674
}
675
676
// Vtable must enforce this behavior
677
unsafe impl Send for Bytes {}
678
unsafe impl Sync for Bytes {}
679
680
impl Drop for Bytes {
681
    #[inline]
682
8.38M
    fn drop(&mut self) {
683
8.38M
        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
684
8.38M
    }
685
}
686
687
impl Clone for Bytes {
688
    #[inline]
689
407
    fn clone(&self) -> Bytes {
690
407
        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
691
407
    }
<bytes::bytes::Bytes as core::clone::Clone>::clone
Line
Count
Source
689
68
    fn clone(&self) -> Bytes {
690
68
        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
691
68
    }
<bytes::bytes::Bytes as core::clone::Clone>::clone
Line
Count
Source
689
268
    fn clone(&self) -> Bytes {
690
268
        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
691
268
    }
Unexecuted instantiation: <bytes::bytes::Bytes as core::clone::Clone>::clone
Unexecuted instantiation: <bytes::bytes::Bytes as core::clone::Clone>::clone
<bytes::bytes::Bytes as core::clone::Clone>::clone
Line
Count
Source
689
71
    fn clone(&self) -> Bytes {
690
71
        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
691
71
    }
692
}
693
694
impl Buf for Bytes {
695
    #[inline]
696
0
    fn remaining(&self) -> usize {
697
0
        self.len()
698
0
    }
699
700
    #[inline]
701
0
    fn chunk(&self) -> &[u8] {
702
0
        self.as_slice()
703
0
    }
704
705
    #[inline]
706
283
    fn advance(&mut self, cnt: usize) {
707
283
        assert!(
708
283
            cnt <= self.len(),
709
0
            "cannot advance past `remaining`: {:?} <= {:?}",
710
            cnt,
711
0
            self.len(),
712
        );
713
714
283
        unsafe {
715
283
            self.inc_start(cnt);
716
283
        }
717
283
    }
Unexecuted instantiation: <bytes::bytes::Bytes as bytes::buf::buf_impl::Buf>::advance
Unexecuted instantiation: <bytes::bytes::Bytes as bytes::buf::buf_impl::Buf>::advance
<bytes::bytes::Bytes as bytes::buf::buf_impl::Buf>::advance
Line
Count
Source
706
283
    fn advance(&mut self, cnt: usize) {
707
283
        assert!(
708
283
            cnt <= self.len(),
709
0
            "cannot advance past `remaining`: {:?} <= {:?}",
710
            cnt,
711
0
            self.len(),
712
        );
713
714
283
        unsafe {
715
283
            self.inc_start(cnt);
716
283
        }
717
283
    }
Unexecuted instantiation: <bytes::bytes::Bytes as bytes::buf::buf_impl::Buf>::advance
718
719
0
    fn copy_to_bytes(&mut self, len: usize) -> Self {
720
0
        self.split_to(len)
721
0
    }
722
}
723
724
impl Deref for Bytes {
725
    type Target = [u8];
726
727
    #[inline]
728
1.90k
    fn deref(&self) -> &[u8] {
729
1.90k
        self.as_slice()
730
1.90k
    }
<bytes::bytes::Bytes as core::ops::deref::Deref>::deref
Line
Count
Source
728
653
    fn deref(&self) -> &[u8] {
729
653
        self.as_slice()
730
653
    }
<bytes::bytes::Bytes as core::ops::deref::Deref>::deref
Line
Count
Source
728
595
    fn deref(&self) -> &[u8] {
729
595
        self.as_slice()
730
595
    }
Unexecuted instantiation: <bytes::bytes::Bytes as core::ops::deref::Deref>::deref
Unexecuted instantiation: <bytes::bytes::Bytes as core::ops::deref::Deref>::deref
<bytes::bytes::Bytes as core::ops::deref::Deref>::deref
Line
Count
Source
728
653
    fn deref(&self) -> &[u8] {
729
653
        self.as_slice()
730
653
    }
731
}
732
733
impl AsRef<[u8]> for Bytes {
734
    #[inline]
735
16.7M
    fn as_ref(&self) -> &[u8] {
736
16.7M
        self.as_slice()
737
16.7M
    }
<bytes::bytes::Bytes as core::convert::AsRef<[u8]>>::as_ref
Line
Count
Source
735
3.16M
    fn as_ref(&self) -> &[u8] {
736
3.16M
        self.as_slice()
737
3.16M
    }
Unexecuted instantiation: <bytes::bytes::Bytes as core::convert::AsRef<[u8]>>::as_ref
Unexecuted instantiation: <bytes::bytes::Bytes as core::convert::AsRef<[u8]>>::as_ref
Unexecuted instantiation: <bytes::bytes::Bytes as core::convert::AsRef<[u8]>>::as_ref
<bytes::bytes::Bytes as core::convert::AsRef<[u8]>>::as_ref
Line
Count
Source
735
13.5M
    fn as_ref(&self) -> &[u8] {
736
13.5M
        self.as_slice()
737
13.5M
    }
738
}
739
740
impl hash::Hash for Bytes {
741
0
    fn hash<H>(&self, state: &mut H)
742
0
    where
743
0
        H: hash::Hasher,
744
    {
745
0
        self.as_slice().hash(state);
746
0
    }
747
}
748
749
impl Borrow<[u8]> for Bytes {
750
0
    fn borrow(&self) -> &[u8] {
751
0
        self.as_slice()
752
0
    }
753
}
754
755
impl IntoIterator for Bytes {
756
    type Item = u8;
757
    type IntoIter = IntoIter<Bytes>;
758
759
0
    fn into_iter(self) -> Self::IntoIter {
760
0
        IntoIter::new(self)
761
0
    }
762
}
763
764
impl<'a> IntoIterator for &'a Bytes {
765
    type Item = &'a u8;
766
    type IntoIter = core::slice::Iter<'a, u8>;
767
768
0
    fn into_iter(self) -> Self::IntoIter {
769
0
        self.as_slice().iter()
770
0
    }
771
}
772
773
impl FromIterator<u8> for Bytes {
774
0
    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
775
0
        Vec::from_iter(into_iter).into()
776
0
    }
777
}
778
779
// impl Eq
780
781
impl PartialEq for Bytes {
782
0
    fn eq(&self, other: &Bytes) -> bool {
783
0
        self.as_slice() == other.as_slice()
784
0
    }
785
}
786
787
impl PartialOrd for Bytes {
788
0
    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
789
0
        Some(self.cmp(other))
790
0
    }
791
}
792
793
impl Ord for Bytes {
794
0
    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
795
0
        self.as_slice().cmp(other.as_slice())
796
0
    }
797
}
798
799
impl Eq for Bytes {}
800
801
impl PartialEq<[u8]> for Bytes {
802
0
    fn eq(&self, other: &[u8]) -> bool {
803
0
        self.as_slice() == other
804
0
    }
805
}
806
807
impl PartialOrd<[u8]> for Bytes {
808
0
    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
809
0
        self.as_slice().partial_cmp(other)
810
0
    }
811
}
812
813
impl PartialEq<Bytes> for [u8] {
814
0
    fn eq(&self, other: &Bytes) -> bool {
815
0
        *other == *self
816
0
    }
817
}
818
819
impl PartialOrd<Bytes> for [u8] {
820
0
    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
821
0
        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
822
0
    }
823
}
824
825
impl PartialEq<str> for Bytes {
826
0
    fn eq(&self, other: &str) -> bool {
827
0
        self.as_slice() == other.as_bytes()
828
0
    }
829
}
830
831
impl PartialOrd<str> for Bytes {
832
0
    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
833
0
        self.as_slice().partial_cmp(other.as_bytes())
834
0
    }
835
}
836
837
impl PartialEq<Bytes> for str {
838
0
    fn eq(&self, other: &Bytes) -> bool {
839
0
        *other == *self
840
0
    }
841
}
842
843
impl PartialOrd<Bytes> for str {
844
0
    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
845
0
        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
846
0
    }
847
}
848
849
impl PartialEq<Vec<u8>> for Bytes {
850
0
    fn eq(&self, other: &Vec<u8>) -> bool {
851
0
        *self == other[..]
852
0
    }
853
}
854
855
impl PartialOrd<Vec<u8>> for Bytes {
856
0
    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
857
0
        self.as_slice().partial_cmp(&other[..])
858
0
    }
859
}
860
861
impl PartialEq<Bytes> for Vec<u8> {
862
0
    fn eq(&self, other: &Bytes) -> bool {
863
0
        *other == *self
864
0
    }
865
}
866
867
impl PartialOrd<Bytes> for Vec<u8> {
868
0
    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
869
0
        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
870
0
    }
871
}
872
873
impl PartialEq<String> for Bytes {
874
0
    fn eq(&self, other: &String) -> bool {
875
0
        *self == other[..]
876
0
    }
877
}
878
879
impl PartialOrd<String> for Bytes {
880
0
    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
881
0
        self.as_slice().partial_cmp(other.as_bytes())
882
0
    }
883
}
884
885
impl PartialEq<Bytes> for String {
886
0
    fn eq(&self, other: &Bytes) -> bool {
887
0
        *other == *self
888
0
    }
889
}
890
891
impl PartialOrd<Bytes> for String {
892
0
    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
893
0
        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
894
0
    }
895
}
896
897
impl PartialEq<Bytes> for &[u8] {
898
0
    fn eq(&self, other: &Bytes) -> bool {
899
0
        *other == *self
900
0
    }
901
}
902
903
impl PartialOrd<Bytes> for &[u8] {
904
0
    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
905
0
        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
906
0
    }
907
}
908
909
impl PartialEq<Bytes> for &str {
910
0
    fn eq(&self, other: &Bytes) -> bool {
911
0
        *other == *self
912
0
    }
913
}
914
915
impl PartialOrd<Bytes> for &str {
916
0
    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
917
0
        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
918
0
    }
919
}
920
921
impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
922
where
923
    Bytes: PartialEq<T>,
924
{
925
0
    fn eq(&self, other: &&'a T) -> bool {
926
0
        *self == **other
927
0
    }
Unexecuted instantiation: <bytes::bytes::Bytes as core::cmp::PartialEq<&[u8]>>::eq
Unexecuted instantiation: <bytes::bytes::Bytes as core::cmp::PartialEq<&str>>::eq
928
}
929
930
impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
931
where
932
    Bytes: PartialOrd<T>,
933
{
934
0
    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
935
0
        self.partial_cmp(&**other)
936
0
    }
937
}
938
939
// impl From
940
941
impl Default for Bytes {
942
    #[inline]
943
0
    fn default() -> Bytes {
944
0
        Bytes::new()
945
0
    }
Unexecuted instantiation: <bytes::bytes::Bytes as core::default::Default>::default
Unexecuted instantiation: <bytes::bytes::Bytes as core::default::Default>::default
946
}
947
948
impl From<&'static [u8]> for Bytes {
949
0
    fn from(slice: &'static [u8]) -> Bytes {
950
0
        Bytes::from_static(slice)
951
0
    }
952
}
953
954
impl From<&'static str> for Bytes {
955
0
    fn from(slice: &'static str) -> Bytes {
956
0
        Bytes::from_static(slice.as_bytes())
957
0
    }
958
}
959
960
impl From<Vec<u8>> for Bytes {
961
450
    fn from(vec: Vec<u8>) -> Bytes {
962
450
        let mut vec = ManuallyDrop::new(vec);
963
450
        let ptr = vec.as_mut_ptr();
964
450
        let len = vec.len();
965
450
        let cap = vec.capacity();
966
967
        // Avoid an extra allocation if possible.
968
450
        if len == cap {
969
356
            let vec = ManuallyDrop::into_inner(vec);
970
356
            return Bytes::from(vec.into_boxed_slice());
971
94
        }
972
973
94
        let shared = Box::new(Shared {
974
94
            buf: ptr,
975
94
            cap,
976
94
            ref_cnt: AtomicUsize::new(1),
977
94
        });
978
979
94
        let shared = Box::into_raw(shared);
980
        // The pointer should be aligned, so this assert should
981
        // always succeed.
982
94
        debug_assert!(
983
0
            0 == (shared as usize & KIND_MASK),
984
0
            "internal: Box<Shared> should have an aligned pointer",
985
        );
986
94
        Bytes {
987
94
            ptr,
988
94
            len,
989
94
            data: AtomicPtr::new(shared as _),
990
94
            vtable: &SHARED_VTABLE,
991
94
        }
992
450
    }
993
}
994
995
impl From<Box<[u8]>> for Bytes {
996
356
    fn from(slice: Box<[u8]>) -> Bytes {
997
        // Box<[u8]> doesn't contain a heap allocation for empty slices,
998
        // so the pointer isn't aligned enough for the KIND_VEC stashing to
999
        // work.
1000
356
        if slice.is_empty() {
1001
24
            return Bytes::new();
1002
332
        }
1003
1004
332
        let len = slice.len();
1005
332
        let ptr = Box::into_raw(slice) as *mut u8;
1006
1007
332
        if ptr as usize & 0x1 == 0 {
1008
332
            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
1009
332
            Bytes {
1010
332
                ptr,
1011
332
                len,
1012
332
                data: AtomicPtr::new(data.cast()),
1013
332
                vtable: &PROMOTABLE_EVEN_VTABLE,
1014
332
            }
1015
        } else {
1016
0
            Bytes {
1017
0
                ptr,
1018
0
                len,
1019
0
                data: AtomicPtr::new(ptr.cast()),
1020
0
                vtable: &PROMOTABLE_ODD_VTABLE,
1021
0
            }
1022
        }
1023
356
    }
1024
}
1025
1026
impl From<Bytes> for BytesMut {
1027
    /// Convert self into `BytesMut`.
1028
    ///
1029
    /// If `bytes` is unique for the entire original buffer, this will return a
1030
    /// `BytesMut` with the contents of `bytes` without copying.
1031
    /// If `bytes` is not unique for the entire original buffer, this will make
1032
    /// a copy of `bytes` subset of the original buffer in a new `BytesMut`.
1033
    ///
1034
    /// # Examples
1035
    ///
1036
    /// ```
1037
    /// use bytes::{Bytes, BytesMut};
1038
    ///
1039
    /// let bytes = Bytes::from(b"hello".to_vec());
1040
    /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
1041
    /// ```
1042
0
    fn from(bytes: Bytes) -> Self {
1043
0
        let bytes = ManuallyDrop::new(bytes);
1044
0
        unsafe { (bytes.vtable.into_mut)(&bytes.data, bytes.ptr, bytes.len) }
1045
0
    }
1046
}
1047
1048
impl From<String> for Bytes {
1049
111
    fn from(s: String) -> Bytes {
1050
111
        Bytes::from(s.into_bytes())
1051
111
    }
1052
}
1053
1054
impl From<Bytes> for Vec<u8> {
1055
0
    fn from(bytes: Bytes) -> Vec<u8> {
1056
0
        let bytes = ManuallyDrop::new(bytes);
1057
0
        unsafe { (bytes.vtable.into_vec)(&bytes.data, bytes.ptr, bytes.len) }
1058
0
    }
1059
}
1060
1061
// ===== impl Vtable =====
1062
1063
impl fmt::Debug for Vtable {
1064
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1065
0
        f.debug_struct("Vtable")
1066
0
            .field("clone", &(self.clone as *const ()))
1067
0
            .field("drop", &(self.drop as *const ()))
1068
0
            .finish()
1069
0
    }
1070
}
1071
1072
// ===== impl StaticVtable =====
1073
1074
const STATIC_VTABLE: Vtable = Vtable {
1075
    clone: static_clone,
1076
    into_vec: static_to_vec,
1077
    into_mut: static_to_mut,
1078
    is_unique: static_is_unique,
1079
    drop: static_drop,
1080
};
1081
1082
235
unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1083
235
    let slice = slice::from_raw_parts(ptr, len);
1084
235
    Bytes::from_static(slice)
1085
235
}
1086
1087
0
unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1088
0
    let slice = slice::from_raw_parts(ptr, len);
1089
0
    slice.to_vec()
1090
0
}
1091
1092
0
unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1093
0
    let slice = slice::from_raw_parts(ptr, len);
1094
0
    BytesMut::from(slice)
1095
0
}
1096
1097
0
fn static_is_unique(_: &AtomicPtr<()>) -> bool {
1098
0
    false
1099
0
}
1100
1101
647
unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
1102
    // nothing to drop for &'static [u8]
1103
647
}
1104
1105
// ===== impl OwnedVtable =====
1106
1107
#[repr(C)]
1108
struct Owned<T> {
1109
    ref_cnt: AtomicUsize,
1110
    owner: T,
1111
}
1112
1113
impl<T> Owned<T> {
1114
    const VTABLE: Vtable = Vtable {
1115
        clone: owned_clone::<T>,
1116
        into_vec: owned_to_vec::<T>,
1117
        into_mut: owned_to_mut::<T>,
1118
        is_unique: owned_is_unique,
1119
        drop: owned_drop::<T>,
1120
    };
1121
}
1122
1123
0
unsafe fn owned_clone<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1124
0
    let owned = data.load(Ordering::Relaxed);
1125
0
    let old_cnt = (*owned.cast::<AtomicUsize>()).fetch_add(1, Ordering::Relaxed);
1126
0
    if old_cnt > usize::MAX >> 1 {
1127
0
        crate::abort();
1128
0
    }
1129
1130
0
    Bytes {
1131
0
        ptr,
1132
0
        len,
1133
0
        data: AtomicPtr::new(owned as _),
1134
0
        vtable: &Owned::<T>::VTABLE,
1135
0
    }
1136
0
}
1137
1138
0
unsafe fn owned_to_vec<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1139
0
    let slice = slice::from_raw_parts(ptr, len);
1140
0
    let vec = slice.to_vec();
1141
0
    owned_drop_impl::<T>(data.load(Ordering::Relaxed));
1142
0
    vec
1143
0
}
1144
1145
0
unsafe fn owned_to_mut<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1146
0
    BytesMut::from_vec(owned_to_vec::<T>(data, ptr, len))
1147
0
}
1148
1149
0
unsafe fn owned_is_unique(_data: &AtomicPtr<()>) -> bool {
1150
0
    false
1151
0
}
1152
1153
0
unsafe fn owned_drop_impl<T>(owned: *mut ()) {
1154
    {
1155
0
        let ref_cnt = &*owned.cast::<AtomicUsize>();
1156
1157
0
        let old_cnt = ref_cnt.fetch_sub(1, Ordering::Release);
1158
0
        debug_assert!(
1159
0
            old_cnt > 0 && old_cnt <= usize::MAX >> 1,
1160
0
            "expected non-zero refcount and no underflow"
1161
        );
1162
0
        if old_cnt != 1 {
1163
0
            return;
1164
0
        }
1165
0
        ref_cnt.load(Ordering::Acquire);
1166
    }
1167
1168
0
    drop(Box::<Owned<T>>::from_raw(owned.cast()));
1169
0
}
1170
1171
0
unsafe fn owned_drop<T>(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1172
0
    let owned = data.load(Ordering::Relaxed);
1173
0
    owned_drop_impl::<T>(owned);
1174
0
}
1175
1176
// ===== impl PromotableVtable =====
1177
1178
static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
1179
    clone: promotable_even_clone,
1180
    into_vec: promotable_even_to_vec,
1181
    into_mut: promotable_even_to_mut,
1182
    is_unique: promotable_is_unique,
1183
    drop: promotable_even_drop,
1184
};
1185
1186
static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1187
    clone: promotable_odd_clone,
1188
    into_vec: promotable_odd_to_vec,
1189
    into_mut: promotable_odd_to_mut,
1190
    is_unique: promotable_is_unique,
1191
    drop: promotable_odd_drop,
1192
};
1193
1194
0
unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1195
0
    let shared = data.load(Ordering::Acquire);
1196
0
    let kind = shared as usize & KIND_MASK;
1197
1198
0
    if kind == KIND_ARC {
1199
0
        shallow_clone_arc(shared.cast(), ptr, len)
1200
    } else {
1201
0
        debug_assert_eq!(kind, KIND_VEC);
1202
0
        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1203
0
        shallow_clone_vec(data, shared, buf, ptr, len)
1204
    }
1205
0
}
1206
1207
0
unsafe fn promotable_to_vec(
1208
0
    data: &AtomicPtr<()>,
1209
0
    ptr: *const u8,
1210
0
    len: usize,
1211
0
    f: fn(*mut ()) -> *mut u8,
1212
0
) -> Vec<u8> {
1213
0
    let shared = data.load(Ordering::Acquire);
1214
0
    let kind = shared as usize & KIND_MASK;
1215
1216
0
    if kind == KIND_ARC {
1217
0
        shared_to_vec_impl(shared.cast(), ptr, len)
1218
    } else {
1219
        // If Bytes holds a Vec, then the offset must be 0.
1220
0
        debug_assert_eq!(kind, KIND_VEC);
1221
1222
0
        let buf = f(shared);
1223
1224
0
        let cap = ptr.offset_from(buf) as usize + len;
1225
1226
        // Copy back buffer
1227
0
        ptr::copy(ptr, buf, len);
1228
1229
0
        Vec::from_raw_parts(buf, len, cap)
1230
    }
1231
0
}
1232
1233
0
unsafe fn promotable_to_mut(
1234
0
    data: &AtomicPtr<()>,
1235
0
    ptr: *const u8,
1236
0
    len: usize,
1237
0
    f: fn(*mut ()) -> *mut u8,
1238
0
) -> BytesMut {
1239
0
    let shared = data.load(Ordering::Acquire);
1240
0
    let kind = shared as usize & KIND_MASK;
1241
1242
0
    if kind == KIND_ARC {
1243
0
        shared_to_mut_impl(shared.cast(), ptr, len)
1244
    } else {
1245
        // KIND_VEC is a view of an underlying buffer at a certain offset.
1246
        // The ptr + len always represents the end of that buffer.
1247
        // Before truncating it, it is first promoted to KIND_ARC.
1248
        // Thus, we can safely reconstruct a Vec from it without leaking memory.
1249
0
        debug_assert_eq!(kind, KIND_VEC);
1250
1251
0
        let buf = f(shared);
1252
0
        let off = ptr.offset_from(buf) as usize;
1253
0
        let cap = off + len;
1254
0
        let v = Vec::from_raw_parts(buf, cap, cap);
1255
1256
0
        let mut b = BytesMut::from_vec(v);
1257
0
        b.advance_unchecked(off);
1258
0
        b
1259
    }
1260
0
}
1261
1262
0
unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1263
0
    promotable_to_vec(data, ptr, len, |shared| {
1264
0
        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1265
0
    })
1266
0
}
1267
1268
0
unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1269
0
    promotable_to_mut(data, ptr, len, |shared| {
1270
0
        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1271
0
    })
1272
0
}
1273
1274
332
unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1275
332
    data.with_mut(|shared| {
1276
332
        let shared = *shared;
1277
332
        let kind = shared as usize & KIND_MASK;
1278
1279
332
        if kind == KIND_ARC {
1280
0
            release_shared(shared.cast());
1281
0
        } else {
1282
332
            debug_assert_eq!(kind, KIND_VEC);
1283
332
            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1284
332
            free_boxed_slice(buf, ptr, len);
1285
        }
1286
332
    });
1287
332
}
1288
1289
0
unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1290
0
    let shared = data.load(Ordering::Acquire);
1291
0
    let kind = shared as usize & KIND_MASK;
1292
1293
0
    if kind == KIND_ARC {
1294
0
        shallow_clone_arc(shared as _, ptr, len)
1295
    } else {
1296
0
        debug_assert_eq!(kind, KIND_VEC);
1297
0
        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1298
    }
1299
0
}
1300
1301
0
unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1302
0
    promotable_to_vec(data, ptr, len, |shared| shared.cast())
1303
0
}
1304
1305
0
unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1306
0
    promotable_to_mut(data, ptr, len, |shared| shared.cast())
1307
0
}
1308
1309
0
unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1310
0
    data.with_mut(|shared| {
1311
0
        let shared = *shared;
1312
0
        let kind = shared as usize & KIND_MASK;
1313
1314
0
        if kind == KIND_ARC {
1315
0
            release_shared(shared.cast());
1316
0
        } else {
1317
0
            debug_assert_eq!(kind, KIND_VEC);
1318
1319
0
            free_boxed_slice(shared.cast(), ptr, len);
1320
        }
1321
0
    });
1322
0
}
1323
1324
0
unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1325
0
    let shared = data.load(Ordering::Acquire);
1326
0
    let kind = shared as usize & KIND_MASK;
1327
1328
0
    if kind == KIND_ARC {
1329
0
        let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1330
0
        ref_cnt == 1
1331
    } else {
1332
0
        true
1333
    }
1334
0
}
1335
1336
332
unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1337
332
    let cap = offset.offset_from(buf) as usize + len;
1338
332
    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1339
332
}
1340
1341
// ===== impl SharedVtable =====
1342
1343
struct Shared {
1344
    // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
1345
    buf: *mut u8,
1346
    cap: usize,
1347
    ref_cnt: AtomicUsize,
1348
}
1349
1350
impl Drop for Shared {
1351
94
    fn drop(&mut self) {
1352
94
        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1353
94
    }
1354
}
1355
1356
// Assert that the alignment of `Shared` is divisible by 2.
1357
// This is a necessary invariant since we depend on allocating `Shared` a
1358
// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
1359
// This flag is set when the LSB is 0.
1360
const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
1361
1362
static SHARED_VTABLE: Vtable = Vtable {
1363
    clone: shared_clone,
1364
    into_vec: shared_to_vec,
1365
    into_mut: shared_to_mut,
1366
    is_unique: shared_is_unique,
1367
    drop: shared_drop,
1368
};
1369
1370
const KIND_ARC: usize = 0b0;
1371
const KIND_VEC: usize = 0b1;
1372
const KIND_MASK: usize = 0b1;
1373
1374
0
unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1375
0
    let shared = data.load(Ordering::Relaxed);
1376
0
    shallow_clone_arc(shared as _, ptr, len)
1377
0
}
1378
1379
0
unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1380
    // Check that the ref_cnt is 1 (unique).
1381
    //
1382
    // If it is unique, then it is set to 0 with AcqRel fence for the same
1383
    // reason in release_shared.
1384
    //
1385
    // Otherwise, we take the other branch and call release_shared.
1386
0
    if (*shared)
1387
0
        .ref_cnt
1388
0
        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1389
0
        .is_ok()
1390
    {
1391
        // Deallocate the `Shared` instance without running its destructor.
1392
0
        let shared = *Box::from_raw(shared);
1393
0
        let shared = ManuallyDrop::new(shared);
1394
0
        let buf = shared.buf;
1395
0
        let cap = shared.cap;
1396
1397
        // Copy back buffer
1398
0
        ptr::copy(ptr, buf, len);
1399
1400
0
        Vec::from_raw_parts(buf, len, cap)
1401
    } else {
1402
0
        let v = slice::from_raw_parts(ptr, len).to_vec();
1403
0
        release_shared(shared);
1404
0
        v
1405
    }
1406
0
}
1407
1408
0
unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1409
0
    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1410
0
}
1411
1412
0
unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1413
    // The goal is to check if the current handle is the only handle
1414
    // that currently has access to the buffer. This is done by
1415
    // checking if the `ref_cnt` is currently 1.
1416
    //
1417
    // The `Acquire` ordering synchronizes with the `Release` as
1418
    // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1419
    // operation guarantees that any mutations done in other threads
1420
    // are ordered before the `ref_cnt` is decremented. As such,
1421
    // this `Acquire` will guarantee that those mutations are
1422
    // visible to the current thread.
1423
    //
1424
    // Otherwise, we take the other branch, copy the data and call `release_shared`.
1425
0
    if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1426
        // Deallocate the `Shared` instance without running its destructor.
1427
0
        let shared = *Box::from_raw(shared);
1428
0
        let shared = ManuallyDrop::new(shared);
1429
0
        let buf = shared.buf;
1430
0
        let cap = shared.cap;
1431
1432
        // Rebuild Vec
1433
0
        let off = ptr.offset_from(buf) as usize;
1434
0
        let v = Vec::from_raw_parts(buf, len + off, cap);
1435
1436
0
        let mut b = BytesMut::from_vec(v);
1437
0
        b.advance_unchecked(off);
1438
0
        b
1439
    } else {
1440
        // Copy the data from Shared in a new Vec, then release it
1441
0
        let v = slice::from_raw_parts(ptr, len).to_vec();
1442
0
        release_shared(shared);
1443
0
        BytesMut::from_vec(v)
1444
    }
1445
0
}
1446
1447
0
unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1448
0
    shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1449
0
}
1450
1451
0
pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1452
0
    let shared = data.load(Ordering::Acquire);
1453
0
    let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1454
0
    ref_cnt == 1
1455
0
}
1456
1457
94
unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1458
94
    data.with_mut(|shared| {
1459
94
        release_shared(shared.cast());
1460
94
    });
1461
94
}
1462
1463
0
unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1464
0
    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1465
1466
0
    if old_size > usize::MAX >> 1 {
1467
0
        crate::abort();
1468
0
    }
1469
1470
0
    Bytes {
1471
0
        ptr,
1472
0
        len,
1473
0
        data: AtomicPtr::new(shared as _),
1474
0
        vtable: &SHARED_VTABLE,
1475
0
    }
1476
0
}
1477
1478
#[cold]
1479
0
unsafe fn shallow_clone_vec(
1480
0
    atom: &AtomicPtr<()>,
1481
0
    ptr: *const (),
1482
0
    buf: *mut u8,
1483
0
    offset: *const u8,
1484
0
    len: usize,
1485
0
) -> Bytes {
1486
    // If the buffer is still tracked in a `Vec<u8>`. It is time to
1487
    // promote the vec to an `Arc`. This could potentially be called
1488
    // concurrently, so some care must be taken.
1489
1490
    // First, allocate a new `Shared` instance containing the
1491
    // `Vec` fields. It's important to note that `ptr`, `len`,
1492
    // and `cap` cannot be mutated without having `&mut self`.
1493
    // This means that these fields will not be concurrently
1494
    // updated and since the buffer hasn't been promoted to an
1495
    // `Arc`, those three fields still are the components of the
1496
    // vector.
1497
0
    let shared = Box::new(Shared {
1498
0
        buf,
1499
0
        cap: offset.offset_from(buf) as usize + len,
1500
0
        // Initialize refcount to 2. One for this reference, and one
1501
0
        // for the new clone that will be returned from
1502
0
        // `shallow_clone`.
1503
0
        ref_cnt: AtomicUsize::new(2),
1504
0
    });
1505
1506
0
    let shared = Box::into_raw(shared);
1507
1508
    // The pointer should be aligned, so this assert should
1509
    // always succeed.
1510
0
    debug_assert!(
1511
0
        0 == (shared as usize & KIND_MASK),
1512
0
        "internal: Box<Shared> should have an aligned pointer",
1513
    );
1514
1515
    // Try compare & swapping the pointer into the `arc` field.
1516
    // `Release` is used synchronize with other threads that
1517
    // will load the `arc` field.
1518
    //
1519
    // If the `compare_exchange` fails, then the thread lost the
1520
    // race to promote the buffer to shared. The `Acquire`
1521
    // ordering will synchronize with the `compare_exchange`
1522
    // that happened in the other thread and the `Shared`
1523
    // pointed to by `actual` will be visible.
1524
0
    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1525
0
        Ok(actual) => {
1526
0
            debug_assert!(core::ptr::eq(actual, ptr));
1527
            // The upgrade was successful, the new handle can be
1528
            // returned.
1529
0
            Bytes {
1530
0
                ptr: offset,
1531
0
                len,
1532
0
                data: AtomicPtr::new(shared as _),
1533
0
                vtable: &SHARED_VTABLE,
1534
0
            }
1535
        }
1536
0
        Err(actual) => {
1537
            // The upgrade failed, a concurrent clone happened. Release
1538
            // the allocation that was made in this thread, it will not
1539
            // be needed.
1540
0
            let shared = Box::from_raw(shared);
1541
0
            mem::forget(*shared);
1542
1543
            // Buffer already promoted to shared storage, so increment ref
1544
            // count.
1545
0
            shallow_clone_arc(actual as _, offset, len)
1546
        }
1547
    }
1548
0
}
1549
1550
94
unsafe fn release_shared(ptr: *mut Shared) {
1551
    // `Shared` storage... follow the drop steps from Arc.
1552
94
    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1553
0
        return;
1554
94
    }
1555
1556
    // This fence is needed to prevent reordering of use of the data and
1557
    // deletion of the data.  Because it is marked `Release`, the decreasing
1558
    // of the reference count synchronizes with this `Acquire` fence. This
1559
    // means that use of the data happens before decreasing the reference
1560
    // count, which happens before this fence, which happens before the
1561
    // deletion of the data.
1562
    //
1563
    // As explained in the [Boost documentation][1],
1564
    //
1565
    // > It is important to enforce any possible access to the object in one
1566
    // > thread (through an existing reference) to *happen before* deleting
1567
    // > the object in a different thread. This is achieved by a "release"
1568
    // > operation after dropping a reference (any access to the object
1569
    // > through this reference must obviously happened before), and an
1570
    // > "acquire" operation before deleting the object.
1571
    //
1572
    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1573
    //
1574
    // Thread sanitizer does not support atomic fences. Use an atomic load
1575
    // instead.
1576
94
    (*ptr).ref_cnt.load(Ordering::Acquire);
1577
1578
    // Drop the data
1579
94
    drop(Box::from_raw(ptr));
1580
94
}
1581
1582
// Ideally we would always use this version of `ptr_map` since it is strict
1583
// provenance compatible, but it results in worse codegen. We will however still
1584
// use it on miri because it gives better diagnostics for people who test bytes
1585
// code with miri.
1586
//
1587
// See https://github.com/tokio-rs/bytes/pull/545 for more info.
1588
#[cfg(miri)]
1589
fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1590
where
1591
    F: FnOnce(usize) -> usize,
1592
{
1593
    let old_addr = ptr as usize;
1594
    let new_addr = f(old_addr);
1595
    let diff = new_addr.wrapping_sub(old_addr);
1596
    ptr.wrapping_add(diff)
1597
}
1598
1599
#[cfg(not(miri))]
1600
664
fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1601
664
where
1602
664
    F: FnOnce(usize) -> usize,
1603
{
1604
664
    let old_addr = ptr as usize;
1605
664
    let new_addr = f(old_addr);
1606
664
    new_addr as *mut u8
1607
664
}
bytes::bytes::ptr_map::<bytes::bytes::promotable_even_drop::{closure#0}::{closure#0}>
Line
Count
Source
1600
332
fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1601
332
where
1602
332
    F: FnOnce(usize) -> usize,
1603
{
1604
332
    let old_addr = ptr as usize;
1605
332
    let new_addr = f(old_addr);
1606
332
    new_addr as *mut u8
1607
332
}
Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_to_mut::{closure#0}::{closure#0}>
Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_to_vec::{closure#0}::{closure#0}>
Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_clone::{closure#0}>
bytes::bytes::ptr_map::<<bytes::bytes::Bytes as core::convert::From<alloc::boxed::Box<[u8]>>>::from::{closure#0}>
Line
Count
Source
1600
332
fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1601
332
where
1602
332
    F: FnOnce(usize) -> usize,
1603
{
1604
332
    let old_addr = ptr as usize;
1605
332
    let new_addr = f(old_addr);
1606
332
    new_addr as *mut u8
1607
332
}
1608
1609
226
fn without_provenance(ptr: usize) -> *const u8 {
1610
226
    core::ptr::null::<u8>().wrapping_add(ptr)
1611
226
}
1612
1613
// compile-fails
1614
1615
/// ```compile_fail
1616
/// use bytes::Bytes;
1617
/// #[deny(unused_must_use)]
1618
/// {
1619
///     let mut b1 = Bytes::from("hello world");
1620
///     b1.split_to(6);
1621
/// }
1622
/// ```
1623
0
fn _split_to_must_use() {}
1624
1625
/// ```compile_fail
1626
/// use bytes::Bytes;
1627
/// #[deny(unused_must_use)]
1628
/// {
1629
///     let mut b1 = Bytes::from("hello world");
1630
///     b1.split_off(6);
1631
/// }
1632
/// ```
1633
0
fn _split_off_must_use() {}
1634
1635
// fuzz tests
1636
#[cfg(all(test, loom))]
1637
mod fuzz {
1638
    use loom::sync::Arc;
1639
    use loom::thread;
1640
1641
    use super::Bytes;
1642
    #[test]
1643
    fn bytes_cloning_vec() {
1644
        loom::model(|| {
1645
            let a = Bytes::from(b"abcdefgh".to_vec());
1646
            let addr = a.as_ptr() as usize;
1647
1648
            // test the Bytes::clone is Sync by putting it in an Arc
1649
            let a1 = Arc::new(a);
1650
            let a2 = a1.clone();
1651
1652
            let t1 = thread::spawn(move || {
1653
                let b: Bytes = (*a1).clone();
1654
                assert_eq!(b.as_ptr() as usize, addr);
1655
            });
1656
1657
            let t2 = thread::spawn(move || {
1658
                let b: Bytes = (*a2).clone();
1659
                assert_eq!(b.as_ptr() as usize, addr);
1660
            });
1661
1662
            t1.join().unwrap();
1663
            t2.join().unwrap();
1664
        });
1665
    }
1666
}