/rust/registry/src/index.crates.io-6f17d22bba15001f/bytes-1.10.1/src/bytes.rs
Line | Count | Source (jump to first uncovered line) |
1 | | use core::iter::FromIterator; |
2 | | use core::mem::{self, ManuallyDrop}; |
3 | | use core::ops::{Deref, RangeBounds}; |
4 | | use core::ptr::NonNull; |
5 | | use core::{cmp, fmt, hash, ptr, slice, usize}; |
6 | | |
7 | | use alloc::{ |
8 | | alloc::{dealloc, Layout}, |
9 | | borrow::Borrow, |
10 | | boxed::Box, |
11 | | string::String, |
12 | | vec::Vec, |
13 | | }; |
14 | | |
15 | | use crate::buf::IntoIter; |
16 | | #[allow(unused)] |
17 | | use crate::loom::sync::atomic::AtomicMut; |
18 | | use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; |
19 | | use crate::{offset_from, Buf, BytesMut}; |
20 | | |
21 | | /// A cheaply cloneable and sliceable chunk of contiguous memory. |
22 | | /// |
23 | | /// `Bytes` is an efficient container for storing and operating on contiguous |
24 | | /// slices of memory. It is intended for use primarily in networking code, but |
25 | | /// could have applications elsewhere as well. |
26 | | /// |
27 | | /// `Bytes` values facilitate zero-copy network programming by allowing multiple |
28 | | /// `Bytes` objects to point to the same underlying memory. |
29 | | /// |
30 | | /// `Bytes` does not have a single implementation. It is an interface, whose |
31 | | /// exact behavior is implemented through dynamic dispatch in several underlying |
32 | | /// implementations of `Bytes`. |
33 | | /// |
34 | | /// All `Bytes` implementations must fulfill the following requirements: |
35 | | /// - They are cheaply cloneable and thereby shareable between an unlimited amount |
36 | | /// of components, for example by modifying a reference count. |
37 | | /// - Instances can be sliced to refer to a subset of the original buffer. |
38 | | /// |
39 | | /// ``` |
40 | | /// use bytes::Bytes; |
41 | | /// |
42 | | /// let mut mem = Bytes::from("Hello world"); |
43 | | /// let a = mem.slice(0..5); |
44 | | /// |
45 | | /// assert_eq!(a, "Hello"); |
46 | | /// |
47 | | /// let b = mem.split_to(6); |
48 | | /// |
49 | | /// assert_eq!(mem, "world"); |
50 | | /// assert_eq!(b, "Hello "); |
51 | | /// ``` |
52 | | /// |
53 | | /// # Memory layout |
54 | | /// |
55 | | /// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used |
56 | | /// to track information about which segment of the underlying memory the |
57 | | /// `Bytes` handle has access to. |
58 | | /// |
59 | | /// `Bytes` keeps both a pointer to the shared state containing the full memory |
60 | | /// slice and a pointer to the start of the region visible by the handle. |
61 | | /// `Bytes` also tracks the length of its view into the memory. |
62 | | /// |
63 | | /// # Sharing |
64 | | /// |
65 | | /// `Bytes` contains a vtable, which allows implementations of `Bytes` to define |
66 | | /// how sharing/cloning is implemented in detail. |
67 | | /// When `Bytes::clone()` is called, `Bytes` will call the vtable function for |
68 | | /// cloning the backing storage in order to share it behind multiple `Bytes` |
69 | | /// instances. |
70 | | /// |
71 | | /// For `Bytes` implementations which refer to constant memory (e.g. created |
72 | | /// via `Bytes::from_static()`) the cloning implementation will be a no-op. |
73 | | /// |
74 | | /// For `Bytes` implementations which point to a reference counted shared storage |
75 | | /// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the |
76 | | /// reference count. |
77 | | /// |
78 | | /// Due to this mechanism, multiple `Bytes` instances may point to the same |
79 | | /// shared memory region. |
80 | | /// Each `Bytes` instance can point to different sections within that |
81 | | /// memory region, and `Bytes` instances may or may not have overlapping views |
82 | | /// into the memory. |
83 | | /// |
84 | | /// The following diagram visualizes a scenario where 2 `Bytes` instances make |
85 | | /// use of an `Arc`-based backing storage, and provide access to different views: |
86 | | /// |
87 | | /// ```text |
88 | | /// |
89 | | /// Arc ptrs ┌─────────┐ |
90 | | /// ________________________ / │ Bytes 2 │ |
91 | | /// / └─────────┘ |
92 | | /// / ┌───────────┐ | | |
93 | | /// |_________/ │ Bytes 1 │ | | |
94 | | /// | └───────────┘ | | |
95 | | /// | | | ___/ data | tail |
96 | | /// | data | tail |/ | |
97 | | /// v v v v |
98 | | /// ┌─────┬─────┬───────────┬───────────────┬─────┐ |
99 | | /// │ Arc │ │ │ │ │ |
100 | | /// └─────┴─────┴───────────┴───────────────┴─────┘ |
101 | | /// ``` |
102 | | pub struct Bytes { |
103 | | ptr: *const u8, |
104 | | len: usize, |
105 | | // inlined "trait object" |
106 | | data: AtomicPtr<()>, |
107 | | vtable: &'static Vtable, |
108 | | } |
109 | | |
110 | | pub(crate) struct Vtable { |
111 | | /// fn(data, ptr, len) |
112 | | pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, |
113 | | /// fn(data, ptr, len) |
114 | | /// |
115 | | /// takes `Bytes` to value |
116 | | pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>, |
117 | | pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut, |
118 | | /// fn(data) |
119 | | pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool, |
120 | | /// fn(data, ptr, len) |
121 | | pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), |
122 | | } |
123 | | |
124 | | impl Bytes { |
125 | | /// Creates a new empty `Bytes`. |
126 | | /// |
127 | | /// This will not allocate and the returned `Bytes` handle will be empty. |
128 | | /// |
129 | | /// # Examples |
130 | | /// |
131 | | /// ``` |
132 | | /// use bytes::Bytes; |
133 | | /// |
134 | | /// let b = Bytes::new(); |
135 | | /// assert_eq!(&b[..], b""); |
136 | | /// ``` |
137 | | #[inline] |
138 | | #[cfg(not(all(loom, test)))] |
139 | 0 | pub const fn new() -> Self { |
140 | | // Make it a named const to work around |
141 | | // "unsizing casts are not allowed in const fn" |
142 | | const EMPTY: &[u8] = &[]; |
143 | 0 | Bytes::from_static(EMPTY) |
144 | 0 | } |
145 | | |
146 | | /// Creates a new empty `Bytes`. |
147 | | #[cfg(all(loom, test))] |
148 | | pub fn new() -> Self { |
149 | | const EMPTY: &[u8] = &[]; |
150 | | Bytes::from_static(EMPTY) |
151 | | } |
152 | | |
153 | | /// Creates a new `Bytes` from a static slice. |
154 | | /// |
155 | | /// The returned `Bytes` will point directly to the static slice. There is |
156 | | /// no allocating or copying. |
157 | | /// |
158 | | /// # Examples |
159 | | /// |
160 | | /// ``` |
161 | | /// use bytes::Bytes; |
162 | | /// |
163 | | /// let b = Bytes::from_static(b"hello"); |
164 | | /// assert_eq!(&b[..], b"hello"); |
165 | | /// ``` |
166 | | #[inline] |
167 | | #[cfg(not(all(loom, test)))] |
168 | 0 | pub const fn from_static(bytes: &'static [u8]) -> Self { |
169 | 0 | Bytes { |
170 | 0 | ptr: bytes.as_ptr(), |
171 | 0 | len: bytes.len(), |
172 | 0 | data: AtomicPtr::new(ptr::null_mut()), |
173 | 0 | vtable: &STATIC_VTABLE, |
174 | 0 | } |
175 | 0 | } |
176 | | |
177 | | /// Creates a new `Bytes` from a static slice. |
178 | | #[cfg(all(loom, test))] |
179 | | pub fn from_static(bytes: &'static [u8]) -> Self { |
180 | | Bytes { |
181 | | ptr: bytes.as_ptr(), |
182 | | len: bytes.len(), |
183 | | data: AtomicPtr::new(ptr::null_mut()), |
184 | | vtable: &STATIC_VTABLE, |
185 | | } |
186 | | } |
187 | | |
188 | | /// Creates a new `Bytes` with length zero and the given pointer as the address. |
189 | 0 | fn new_empty_with_ptr(ptr: *const u8) -> Self { |
190 | 0 | debug_assert!(!ptr.is_null()); |
191 | | |
192 | | // Detach this pointer's provenance from whichever allocation it came from, and reattach it |
193 | | // to the provenance of the fake ZST [u8;0] at the same address. |
194 | 0 | let ptr = without_provenance(ptr as usize); |
195 | 0 |
|
196 | 0 | Bytes { |
197 | 0 | ptr, |
198 | 0 | len: 0, |
199 | 0 | data: AtomicPtr::new(ptr::null_mut()), |
200 | 0 | vtable: &STATIC_VTABLE, |
201 | 0 | } |
202 | 0 | } |
203 | | |
204 | | /// Create [Bytes] with a buffer whose lifetime is controlled |
205 | | /// via an explicit owner. |
206 | | /// |
207 | | /// A common use case is to zero-copy construct from mapped memory. |
208 | | /// |
209 | | /// ``` |
210 | | /// # struct File; |
211 | | /// # |
212 | | /// # impl File { |
213 | | /// # pub fn open(_: &str) -> Result<Self, ()> { |
214 | | /// # Ok(Self) |
215 | | /// # } |
216 | | /// # } |
217 | | /// # |
218 | | /// # mod memmap2 { |
219 | | /// # pub struct Mmap; |
220 | | /// # |
221 | | /// # impl Mmap { |
222 | | /// # pub unsafe fn map(_file: &super::File) -> Result<Self, ()> { |
223 | | /// # Ok(Self) |
224 | | /// # } |
225 | | /// # } |
226 | | /// # |
227 | | /// # impl AsRef<[u8]> for Mmap { |
228 | | /// # fn as_ref(&self) -> &[u8] { |
229 | | /// # b"buf" |
230 | | /// # } |
231 | | /// # } |
232 | | /// # } |
233 | | /// use bytes::Bytes; |
234 | | /// use memmap2::Mmap; |
235 | | /// |
236 | | /// # fn main() -> Result<(), ()> { |
237 | | /// let file = File::open("upload_bundle.tar.gz")?; |
238 | | /// let mmap = unsafe { Mmap::map(&file) }?; |
239 | | /// let b = Bytes::from_owner(mmap); |
240 | | /// # Ok(()) |
241 | | /// # } |
242 | | /// ``` |
243 | | /// |
244 | | /// The `owner` will be transferred to the constructed [Bytes] object, which |
245 | | /// will ensure it is dropped once all remaining clones of the constructed |
246 | | /// object are dropped. The owner will then be responsible for dropping the |
247 | | /// specified region of memory as part of its [Drop] implementation. |
248 | | /// |
249 | | /// Note that converting [Bytes] constructed from an owner into a [BytesMut] |
250 | | /// will always create a deep copy of the buffer into newly allocated memory. |
251 | 0 | pub fn from_owner<T>(owner: T) -> Self |
252 | 0 | where |
253 | 0 | T: AsRef<[u8]> + Send + 'static, |
254 | 0 | { |
255 | 0 | // Safety & Miri: |
256 | 0 | // The ownership of `owner` is first transferred to the `Owned` wrapper and `Bytes` object. |
257 | 0 | // This ensures that the owner is pinned in memory, allowing us to call `.as_ref()` safely |
258 | 0 | // since the lifetime of the owner is controlled by the lifetime of the new `Bytes` object, |
259 | 0 | // and the lifetime of the resulting borrowed `&[u8]` matches that of the owner. |
260 | 0 | // Note that this remains safe so long as we only call `.as_ref()` once. |
261 | 0 | // |
262 | 0 | // There are some additional special considerations here: |
263 | 0 | // * We rely on Bytes's Drop impl to clean up memory should `.as_ref()` panic. |
264 | 0 | // * Setting the `ptr` and `len` on the bytes object last (after moving the owner to |
265 | 0 | // Bytes) allows Miri checks to pass since it avoids obtaining the `&[u8]` slice |
266 | 0 | // from a stack-owned Box. |
267 | 0 | // More details on this: https://github.com/tokio-rs/bytes/pull/742/#discussion_r1813375863 |
268 | 0 | // and: https://github.com/tokio-rs/bytes/pull/742/#discussion_r1813316032 |
269 | 0 |
|
270 | 0 | let owned = Box::into_raw(Box::new(Owned { |
271 | 0 | lifetime: OwnedLifetime { |
272 | 0 | ref_cnt: AtomicUsize::new(1), |
273 | 0 | drop: owned_box_and_drop::<T>, |
274 | 0 | }, |
275 | 0 | owner, |
276 | 0 | })); |
277 | 0 |
|
278 | 0 | let mut ret = Bytes { |
279 | 0 | ptr: NonNull::dangling().as_ptr(), |
280 | 0 | len: 0, |
281 | 0 | data: AtomicPtr::new(owned.cast()), |
282 | 0 | vtable: &OWNED_VTABLE, |
283 | 0 | }; |
284 | 0 |
|
285 | 0 | let buf = unsafe { &*owned }.owner.as_ref(); |
286 | 0 | ret.ptr = buf.as_ptr(); |
287 | 0 | ret.len = buf.len(); |
288 | 0 |
|
289 | 0 | ret |
290 | 0 | } |
291 | | |
292 | | /// Returns the number of bytes contained in this `Bytes`. |
293 | | /// |
294 | | /// # Examples |
295 | | /// |
296 | | /// ``` |
297 | | /// use bytes::Bytes; |
298 | | /// |
299 | | /// let b = Bytes::from(&b"hello"[..]); |
300 | | /// assert_eq!(b.len(), 5); |
301 | | /// ``` |
302 | | #[inline] |
303 | 0 | pub const fn len(&self) -> usize { |
304 | 0 | self.len |
305 | 0 | } |
306 | | |
307 | | /// Returns true if the `Bytes` has a length of 0. |
308 | | /// |
309 | | /// # Examples |
310 | | /// |
311 | | /// ``` |
312 | | /// use bytes::Bytes; |
313 | | /// |
314 | | /// let b = Bytes::new(); |
315 | | /// assert!(b.is_empty()); |
316 | | /// ``` |
317 | | #[inline] |
318 | 0 | pub const fn is_empty(&self) -> bool { |
319 | 0 | self.len == 0 |
320 | 0 | } |
321 | | |
322 | | /// Returns true if this is the only reference to the data and |
323 | | /// `Into<BytesMut>` would avoid cloning the underlying buffer. |
324 | | /// |
325 | | /// Always returns false if the data is backed by a [static slice](Bytes::from_static), |
326 | | /// or an [owner](Bytes::from_owner). |
327 | | /// |
328 | | /// The result of this method may be invalidated immediately if another |
329 | | /// thread clones this value while this is being called. Ensure you have |
330 | | /// unique access to this value (`&mut Bytes`) first if you need to be |
331 | | /// certain the result is valid (i.e. for safety reasons). |
332 | | /// # Examples |
333 | | /// |
334 | | /// ``` |
335 | | /// use bytes::Bytes; |
336 | | /// |
337 | | /// let a = Bytes::from(vec![1, 2, 3]); |
338 | | /// assert!(a.is_unique()); |
339 | | /// let b = a.clone(); |
340 | | /// assert!(!a.is_unique()); |
341 | | /// ``` |
342 | 0 | pub fn is_unique(&self) -> bool { |
343 | 0 | unsafe { (self.vtable.is_unique)(&self.data) } |
344 | 0 | } |
345 | | |
346 | | /// Creates `Bytes` instance from slice, by copying it. |
347 | 0 | pub fn copy_from_slice(data: &[u8]) -> Self { |
348 | 0 | data.to_vec().into() |
349 | 0 | } |
350 | | |
351 | | /// Returns a slice of self for the provided range. |
352 | | /// |
353 | | /// This will increment the reference count for the underlying memory and |
354 | | /// return a new `Bytes` handle set to the slice. |
355 | | /// |
356 | | /// This operation is `O(1)`. |
357 | | /// |
358 | | /// # Examples |
359 | | /// |
360 | | /// ``` |
361 | | /// use bytes::Bytes; |
362 | | /// |
363 | | /// let a = Bytes::from(&b"hello world"[..]); |
364 | | /// let b = a.slice(2..5); |
365 | | /// |
366 | | /// assert_eq!(&b[..], b"llo"); |
367 | | /// ``` |
368 | | /// |
369 | | /// # Panics |
370 | | /// |
371 | | /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing |
372 | | /// will panic. |
373 | 0 | pub fn slice(&self, range: impl RangeBounds<usize>) -> Self { |
374 | | use core::ops::Bound; |
375 | | |
376 | 0 | let len = self.len(); |
377 | | |
378 | 0 | let begin = match range.start_bound() { |
379 | 0 | Bound::Included(&n) => n, |
380 | 0 | Bound::Excluded(&n) => n.checked_add(1).expect("out of range"), |
381 | 0 | Bound::Unbounded => 0, |
382 | | }; |
383 | | |
384 | 0 | let end = match range.end_bound() { |
385 | 0 | Bound::Included(&n) => n.checked_add(1).expect("out of range"), |
386 | 0 | Bound::Excluded(&n) => n, |
387 | 0 | Bound::Unbounded => len, |
388 | | }; |
389 | | |
390 | 0 | assert!( |
391 | 0 | begin <= end, |
392 | 0 | "range start must not be greater than end: {:?} <= {:?}", |
393 | | begin, |
394 | | end, |
395 | | ); |
396 | 0 | assert!( |
397 | 0 | end <= len, |
398 | 0 | "range end out of bounds: {:?} <= {:?}", |
399 | | end, |
400 | | len, |
401 | | ); |
402 | | |
403 | 0 | if end == begin { |
404 | 0 | return Bytes::new(); |
405 | 0 | } |
406 | 0 |
|
407 | 0 | let mut ret = self.clone(); |
408 | 0 |
|
409 | 0 | ret.len = end - begin; |
410 | 0 | ret.ptr = unsafe { ret.ptr.add(begin) }; |
411 | 0 |
|
412 | 0 | ret |
413 | 0 | } |
414 | | |
415 | | /// Returns a slice of self that is equivalent to the given `subset`. |
416 | | /// |
417 | | /// When processing a `Bytes` buffer with other tools, one often gets a |
418 | | /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it. |
419 | | /// This function turns that `&[u8]` into another `Bytes`, as if one had |
420 | | /// called `self.slice()` with the offsets that correspond to `subset`. |
421 | | /// |
422 | | /// This operation is `O(1)`. |
423 | | /// |
424 | | /// # Examples |
425 | | /// |
426 | | /// ``` |
427 | | /// use bytes::Bytes; |
428 | | /// |
429 | | /// let bytes = Bytes::from(&b"012345678"[..]); |
430 | | /// let as_slice = bytes.as_ref(); |
431 | | /// let subset = &as_slice[2..6]; |
432 | | /// let subslice = bytes.slice_ref(&subset); |
433 | | /// assert_eq!(&subslice[..], b"2345"); |
434 | | /// ``` |
435 | | /// |
436 | | /// # Panics |
437 | | /// |
438 | | /// Requires that the given `sub` slice is in fact contained within the |
439 | | /// `Bytes` buffer; otherwise this function will panic. |
440 | 0 | pub fn slice_ref(&self, subset: &[u8]) -> Self { |
441 | 0 | // Empty slice and empty Bytes may have their pointers reset |
442 | 0 | // so explicitly allow empty slice to be a subslice of any slice. |
443 | 0 | if subset.is_empty() { |
444 | 0 | return Bytes::new(); |
445 | 0 | } |
446 | 0 |
|
447 | 0 | let bytes_p = self.as_ptr() as usize; |
448 | 0 | let bytes_len = self.len(); |
449 | 0 |
|
450 | 0 | let sub_p = subset.as_ptr() as usize; |
451 | 0 | let sub_len = subset.len(); |
452 | 0 |
|
453 | 0 | assert!( |
454 | 0 | sub_p >= bytes_p, |
455 | 0 | "subset pointer ({:p}) is smaller than self pointer ({:p})", |
456 | 0 | subset.as_ptr(), |
457 | 0 | self.as_ptr(), |
458 | | ); |
459 | 0 | assert!( |
460 | 0 | sub_p + sub_len <= bytes_p + bytes_len, |
461 | 0 | "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})", |
462 | 0 | self.as_ptr(), |
463 | 0 | bytes_len, |
464 | 0 | subset.as_ptr(), |
465 | | sub_len, |
466 | | ); |
467 | | |
468 | 0 | let sub_offset = sub_p - bytes_p; |
469 | 0 |
|
470 | 0 | self.slice(sub_offset..(sub_offset + sub_len)) |
471 | 0 | } |
472 | | |
473 | | /// Splits the bytes into two at the given index. |
474 | | /// |
475 | | /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` |
476 | | /// contains elements `[at, len)`. It's guaranteed that the memory does not |
477 | | /// move, that is, the address of `self` does not change, and the address of |
478 | | /// the returned slice is `at` bytes after that. |
479 | | /// |
480 | | /// This is an `O(1)` operation that just increases the reference count and |
481 | | /// sets a few indices. |
482 | | /// |
483 | | /// # Examples |
484 | | /// |
485 | | /// ``` |
486 | | /// use bytes::Bytes; |
487 | | /// |
488 | | /// let mut a = Bytes::from(&b"hello world"[..]); |
489 | | /// let b = a.split_off(5); |
490 | | /// |
491 | | /// assert_eq!(&a[..], b"hello"); |
492 | | /// assert_eq!(&b[..], b" world"); |
493 | | /// ``` |
494 | | /// |
495 | | /// # Panics |
496 | | /// |
497 | | /// Panics if `at > len`. |
498 | | #[must_use = "consider Bytes::truncate if you don't need the other half"] |
499 | 0 | pub fn split_off(&mut self, at: usize) -> Self { |
500 | 0 | if at == self.len() { |
501 | 0 | return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at)); |
502 | 0 | } |
503 | 0 |
|
504 | 0 | if at == 0 { |
505 | 0 | return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr)); |
506 | 0 | } |
507 | 0 |
|
508 | 0 | assert!( |
509 | 0 | at <= self.len(), |
510 | 0 | "split_off out of bounds: {:?} <= {:?}", |
511 | 0 | at, |
512 | 0 | self.len(), |
513 | | ); |
514 | | |
515 | 0 | let mut ret = self.clone(); |
516 | 0 |
|
517 | 0 | self.len = at; |
518 | 0 |
|
519 | 0 | unsafe { ret.inc_start(at) }; |
520 | 0 |
|
521 | 0 | ret |
522 | 0 | } |
523 | | |
524 | | /// Splits the bytes into two at the given index. |
525 | | /// |
526 | | /// Afterwards `self` contains elements `[at, len)`, and the returned |
527 | | /// `Bytes` contains elements `[0, at)`. |
528 | | /// |
529 | | /// This is an `O(1)` operation that just increases the reference count and |
530 | | /// sets a few indices. |
531 | | /// |
532 | | /// # Examples |
533 | | /// |
534 | | /// ``` |
535 | | /// use bytes::Bytes; |
536 | | /// |
537 | | /// let mut a = Bytes::from(&b"hello world"[..]); |
538 | | /// let b = a.split_to(5); |
539 | | /// |
540 | | /// assert_eq!(&a[..], b" world"); |
541 | | /// assert_eq!(&b[..], b"hello"); |
542 | | /// ``` |
543 | | /// |
544 | | /// # Panics |
545 | | /// |
546 | | /// Panics if `at > len`. |
547 | | #[must_use = "consider Bytes::advance if you don't need the other half"] |
548 | 0 | pub fn split_to(&mut self, at: usize) -> Self { |
549 | 0 | if at == self.len() { |
550 | 0 | let end_ptr = self.ptr.wrapping_add(at); |
551 | 0 | return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr)); |
552 | 0 | } |
553 | 0 |
|
554 | 0 | if at == 0 { |
555 | 0 | return Bytes::new_empty_with_ptr(self.ptr); |
556 | 0 | } |
557 | 0 |
|
558 | 0 | assert!( |
559 | 0 | at <= self.len(), |
560 | 0 | "split_to out of bounds: {:?} <= {:?}", |
561 | 0 | at, |
562 | 0 | self.len(), |
563 | | ); |
564 | | |
565 | 0 | let mut ret = self.clone(); |
566 | 0 |
|
567 | 0 | unsafe { self.inc_start(at) }; |
568 | 0 |
|
569 | 0 | ret.len = at; |
570 | 0 | ret |
571 | 0 | } |
572 | | |
573 | | /// Shortens the buffer, keeping the first `len` bytes and dropping the |
574 | | /// rest. |
575 | | /// |
576 | | /// If `len` is greater than the buffer's current length, this has no |
577 | | /// effect. |
578 | | /// |
579 | | /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the |
580 | | /// excess bytes to be returned instead of dropped. |
581 | | /// |
582 | | /// # Examples |
583 | | /// |
584 | | /// ``` |
585 | | /// use bytes::Bytes; |
586 | | /// |
587 | | /// let mut buf = Bytes::from(&b"hello world"[..]); |
588 | | /// buf.truncate(5); |
589 | | /// assert_eq!(buf, b"hello"[..]); |
590 | | /// ``` |
591 | | #[inline] |
592 | 0 | pub fn truncate(&mut self, len: usize) { |
593 | 0 | if len < self.len { |
594 | | // The Vec "promotable" vtables do not store the capacity, |
595 | | // so we cannot truncate while using this repr. We *have* to |
596 | | // promote using `split_off` so the capacity can be stored. |
597 | 0 | if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE |
598 | 0 | || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE |
599 | 0 | { |
600 | 0 | drop(self.split_off(len)); |
601 | 0 | } else { |
602 | 0 | self.len = len; |
603 | 0 | } |
604 | 0 | } |
605 | 0 | } |
606 | | |
607 | | /// Clears the buffer, removing all data. |
608 | | /// |
609 | | /// # Examples |
610 | | /// |
611 | | /// ``` |
612 | | /// use bytes::Bytes; |
613 | | /// |
614 | | /// let mut buf = Bytes::from(&b"hello world"[..]); |
615 | | /// buf.clear(); |
616 | | /// assert!(buf.is_empty()); |
617 | | /// ``` |
618 | | #[inline] |
619 | 0 | pub fn clear(&mut self) { |
620 | 0 | self.truncate(0); |
621 | 0 | } |
622 | | |
623 | | /// Try to convert self into `BytesMut`. |
624 | | /// |
625 | | /// If `self` is unique for the entire original buffer, this will succeed |
626 | | /// and return a `BytesMut` with the contents of `self` without copying. |
627 | | /// If `self` is not unique for the entire original buffer, this will fail |
628 | | /// and return self. |
629 | | /// |
630 | | /// This will also always fail if the buffer was constructed via either |
631 | | /// [from_owner](Bytes::from_owner) or [from_static](Bytes::from_static). |
632 | | /// |
633 | | /// # Examples |
634 | | /// |
635 | | /// ``` |
636 | | /// use bytes::{Bytes, BytesMut}; |
637 | | /// |
638 | | /// let bytes = Bytes::from(b"hello".to_vec()); |
639 | | /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..]))); |
640 | | /// ``` |
641 | 0 | pub fn try_into_mut(self) -> Result<BytesMut, Bytes> { |
642 | 0 | if self.is_unique() { |
643 | 0 | Ok(self.into()) |
644 | | } else { |
645 | 0 | Err(self) |
646 | | } |
647 | 0 | } |
648 | | |
649 | | #[inline] |
650 | 0 | pub(crate) unsafe fn with_vtable( |
651 | 0 | ptr: *const u8, |
652 | 0 | len: usize, |
653 | 0 | data: AtomicPtr<()>, |
654 | 0 | vtable: &'static Vtable, |
655 | 0 | ) -> Bytes { |
656 | 0 | Bytes { |
657 | 0 | ptr, |
658 | 0 | len, |
659 | 0 | data, |
660 | 0 | vtable, |
661 | 0 | } |
662 | 0 | } |
663 | | |
664 | | // private |
665 | | |
666 | | #[inline] |
667 | 0 | fn as_slice(&self) -> &[u8] { |
668 | 0 | unsafe { slice::from_raw_parts(self.ptr, self.len) } |
669 | 0 | } |
670 | | |
671 | | #[inline] |
672 | 0 | unsafe fn inc_start(&mut self, by: usize) { |
673 | 0 | // should already be asserted, but debug assert for tests |
674 | 0 | debug_assert!(self.len >= by, "internal: inc_start out of bounds"); |
675 | 0 | self.len -= by; |
676 | 0 | self.ptr = self.ptr.add(by); |
677 | 0 | } |
678 | | } |
679 | | |
680 | | // Vtable must enforce this behavior |
681 | | unsafe impl Send for Bytes {} |
682 | | unsafe impl Sync for Bytes {} |
683 | | |
684 | | impl Drop for Bytes { |
685 | | #[inline] |
686 | 0 | fn drop(&mut self) { |
687 | 0 | unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) } |
688 | 0 | } |
689 | | } |
690 | | |
691 | | impl Clone for Bytes { |
692 | | #[inline] |
693 | 0 | fn clone(&self) -> Bytes { |
694 | 0 | unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) } |
695 | 0 | } |
696 | | } |
697 | | |
698 | | impl Buf for Bytes { |
699 | | #[inline] |
700 | 0 | fn remaining(&self) -> usize { |
701 | 0 | self.len() |
702 | 0 | } |
703 | | |
704 | | #[inline] |
705 | 0 | fn chunk(&self) -> &[u8] { |
706 | 0 | self.as_slice() |
707 | 0 | } |
708 | | |
709 | | #[inline] |
710 | 0 | fn advance(&mut self, cnt: usize) { |
711 | 0 | assert!( |
712 | 0 | cnt <= self.len(), |
713 | 0 | "cannot advance past `remaining`: {:?} <= {:?}", |
714 | 0 | cnt, |
715 | 0 | self.len(), |
716 | | ); |
717 | | |
718 | 0 | unsafe { |
719 | 0 | self.inc_start(cnt); |
720 | 0 | } |
721 | 0 | } |
722 | | |
723 | 0 | fn copy_to_bytes(&mut self, len: usize) -> Self { |
724 | 0 | self.split_to(len) |
725 | 0 | } |
726 | | } |
727 | | |
728 | | impl Deref for Bytes { |
729 | | type Target = [u8]; |
730 | | |
731 | | #[inline] |
732 | 0 | fn deref(&self) -> &[u8] { |
733 | 0 | self.as_slice() |
734 | 0 | } |
735 | | } |
736 | | |
737 | | impl AsRef<[u8]> for Bytes { |
738 | | #[inline] |
739 | 0 | fn as_ref(&self) -> &[u8] { |
740 | 0 | self.as_slice() |
741 | 0 | } |
742 | | } |
743 | | |
744 | | impl hash::Hash for Bytes { |
745 | 0 | fn hash<H>(&self, state: &mut H) |
746 | 0 | where |
747 | 0 | H: hash::Hasher, |
748 | 0 | { |
749 | 0 | self.as_slice().hash(state); |
750 | 0 | } |
751 | | } |
752 | | |
753 | | impl Borrow<[u8]> for Bytes { |
754 | 0 | fn borrow(&self) -> &[u8] { |
755 | 0 | self.as_slice() |
756 | 0 | } |
757 | | } |
758 | | |
759 | | impl IntoIterator for Bytes { |
760 | | type Item = u8; |
761 | | type IntoIter = IntoIter<Bytes>; |
762 | | |
763 | 0 | fn into_iter(self) -> Self::IntoIter { |
764 | 0 | IntoIter::new(self) |
765 | 0 | } |
766 | | } |
767 | | |
768 | | impl<'a> IntoIterator for &'a Bytes { |
769 | | type Item = &'a u8; |
770 | | type IntoIter = core::slice::Iter<'a, u8>; |
771 | | |
772 | 0 | fn into_iter(self) -> Self::IntoIter { |
773 | 0 | self.as_slice().iter() |
774 | 0 | } |
775 | | } |
776 | | |
777 | | impl FromIterator<u8> for Bytes { |
778 | 0 | fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self { |
779 | 0 | Vec::from_iter(into_iter).into() |
780 | 0 | } |
781 | | } |
782 | | |
783 | | // impl Eq |
784 | | |
785 | | impl PartialEq for Bytes { |
786 | 0 | fn eq(&self, other: &Bytes) -> bool { |
787 | 0 | self.as_slice() == other.as_slice() |
788 | 0 | } |
789 | | } |
790 | | |
791 | | impl PartialOrd for Bytes { |
792 | 0 | fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { |
793 | 0 | self.as_slice().partial_cmp(other.as_slice()) |
794 | 0 | } |
795 | | } |
796 | | |
797 | | impl Ord for Bytes { |
798 | 0 | fn cmp(&self, other: &Bytes) -> cmp::Ordering { |
799 | 0 | self.as_slice().cmp(other.as_slice()) |
800 | 0 | } |
801 | | } |
802 | | |
803 | | impl Eq for Bytes {} |
804 | | |
805 | | impl PartialEq<[u8]> for Bytes { |
806 | 0 | fn eq(&self, other: &[u8]) -> bool { |
807 | 0 | self.as_slice() == other |
808 | 0 | } |
809 | | } |
810 | | |
811 | | impl PartialOrd<[u8]> for Bytes { |
812 | 0 | fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> { |
813 | 0 | self.as_slice().partial_cmp(other) |
814 | 0 | } |
815 | | } |
816 | | |
817 | | impl PartialEq<Bytes> for [u8] { |
818 | 0 | fn eq(&self, other: &Bytes) -> bool { |
819 | 0 | *other == *self |
820 | 0 | } |
821 | | } |
822 | | |
823 | | impl PartialOrd<Bytes> for [u8] { |
824 | 0 | fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { |
825 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) |
826 | 0 | } |
827 | | } |
828 | | |
829 | | impl PartialEq<str> for Bytes { |
830 | 0 | fn eq(&self, other: &str) -> bool { |
831 | 0 | self.as_slice() == other.as_bytes() |
832 | 0 | } |
833 | | } |
834 | | |
835 | | impl PartialOrd<str> for Bytes { |
836 | 0 | fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> { |
837 | 0 | self.as_slice().partial_cmp(other.as_bytes()) |
838 | 0 | } |
839 | | } |
840 | | |
841 | | impl PartialEq<Bytes> for str { |
842 | 0 | fn eq(&self, other: &Bytes) -> bool { |
843 | 0 | *other == *self |
844 | 0 | } |
845 | | } |
846 | | |
847 | | impl PartialOrd<Bytes> for str { |
848 | 0 | fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { |
849 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) |
850 | 0 | } |
851 | | } |
852 | | |
853 | | impl PartialEq<Vec<u8>> for Bytes { |
854 | 0 | fn eq(&self, other: &Vec<u8>) -> bool { |
855 | 0 | *self == other[..] |
856 | 0 | } |
857 | | } |
858 | | |
859 | | impl PartialOrd<Vec<u8>> for Bytes { |
860 | 0 | fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> { |
861 | 0 | self.as_slice().partial_cmp(&other[..]) |
862 | 0 | } |
863 | | } |
864 | | |
865 | | impl PartialEq<Bytes> for Vec<u8> { |
866 | 0 | fn eq(&self, other: &Bytes) -> bool { |
867 | 0 | *other == *self |
868 | 0 | } |
869 | | } |
870 | | |
871 | | impl PartialOrd<Bytes> for Vec<u8> { |
872 | 0 | fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { |
873 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) |
874 | 0 | } |
875 | | } |
876 | | |
877 | | impl PartialEq<String> for Bytes { |
878 | 0 | fn eq(&self, other: &String) -> bool { |
879 | 0 | *self == other[..] |
880 | 0 | } |
881 | | } |
882 | | |
883 | | impl PartialOrd<String> for Bytes { |
884 | 0 | fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> { |
885 | 0 | self.as_slice().partial_cmp(other.as_bytes()) |
886 | 0 | } |
887 | | } |
888 | | |
889 | | impl PartialEq<Bytes> for String { |
890 | 0 | fn eq(&self, other: &Bytes) -> bool { |
891 | 0 | *other == *self |
892 | 0 | } |
893 | | } |
894 | | |
895 | | impl PartialOrd<Bytes> for String { |
896 | 0 | fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { |
897 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) |
898 | 0 | } |
899 | | } |
900 | | |
901 | | impl PartialEq<Bytes> for &[u8] { |
902 | 0 | fn eq(&self, other: &Bytes) -> bool { |
903 | 0 | *other == *self |
904 | 0 | } |
905 | | } |
906 | | |
907 | | impl PartialOrd<Bytes> for &[u8] { |
908 | 0 | fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { |
909 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) |
910 | 0 | } |
911 | | } |
912 | | |
913 | | impl PartialEq<Bytes> for &str { |
914 | 0 | fn eq(&self, other: &Bytes) -> bool { |
915 | 0 | *other == *self |
916 | 0 | } |
917 | | } |
918 | | |
919 | | impl PartialOrd<Bytes> for &str { |
920 | 0 | fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { |
921 | 0 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) |
922 | 0 | } |
923 | | } |
924 | | |
925 | | impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes |
926 | | where |
927 | | Bytes: PartialEq<T>, |
928 | | { |
929 | 0 | fn eq(&self, other: &&'a T) -> bool { |
930 | 0 | *self == **other |
931 | 0 | } Unexecuted instantiation: <bytes::bytes::Bytes as core::cmp::PartialEq<&[u8]>>::eq Unexecuted instantiation: <bytes::bytes::Bytes as core::cmp::PartialEq<&str>>::eq |
932 | | } |
933 | | |
934 | | impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes |
935 | | where |
936 | | Bytes: PartialOrd<T>, |
937 | | { |
938 | 0 | fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> { |
939 | 0 | self.partial_cmp(&**other) |
940 | 0 | } |
941 | | } |
942 | | |
943 | | // impl From |
944 | | |
945 | | impl Default for Bytes { |
946 | | #[inline] |
947 | 0 | fn default() -> Bytes { |
948 | 0 | Bytes::new() |
949 | 0 | } |
950 | | } |
951 | | |
952 | | impl From<&'static [u8]> for Bytes { |
953 | 0 | fn from(slice: &'static [u8]) -> Bytes { |
954 | 0 | Bytes::from_static(slice) |
955 | 0 | } |
956 | | } |
957 | | |
958 | | impl From<&'static str> for Bytes { |
959 | 0 | fn from(slice: &'static str) -> Bytes { |
960 | 0 | Bytes::from_static(slice.as_bytes()) |
961 | 0 | } |
962 | | } |
963 | | |
964 | | impl From<Vec<u8>> for Bytes { |
965 | 0 | fn from(vec: Vec<u8>) -> Bytes { |
966 | 0 | let mut vec = ManuallyDrop::new(vec); |
967 | 0 | let ptr = vec.as_mut_ptr(); |
968 | 0 | let len = vec.len(); |
969 | 0 | let cap = vec.capacity(); |
970 | 0 |
|
971 | 0 | // Avoid an extra allocation if possible. |
972 | 0 | if len == cap { |
973 | 0 | let vec = ManuallyDrop::into_inner(vec); |
974 | 0 | return Bytes::from(vec.into_boxed_slice()); |
975 | 0 | } |
976 | 0 |
|
977 | 0 | let shared = Box::new(Shared { |
978 | 0 | buf: ptr, |
979 | 0 | cap, |
980 | 0 | ref_cnt: AtomicUsize::new(1), |
981 | 0 | }); |
982 | 0 |
|
983 | 0 | let shared = Box::into_raw(shared); |
984 | 0 | // The pointer should be aligned, so this assert should |
985 | 0 | // always succeed. |
986 | 0 | debug_assert!( |
987 | 0 | 0 == (shared as usize & KIND_MASK), |
988 | | "internal: Box<Shared> should have an aligned pointer", |
989 | | ); |
990 | 0 | Bytes { |
991 | 0 | ptr, |
992 | 0 | len, |
993 | 0 | data: AtomicPtr::new(shared as _), |
994 | 0 | vtable: &SHARED_VTABLE, |
995 | 0 | } |
996 | 0 | } |
997 | | } |
998 | | |
999 | | impl From<Box<[u8]>> for Bytes { |
1000 | 0 | fn from(slice: Box<[u8]>) -> Bytes { |
1001 | 0 | // Box<[u8]> doesn't contain a heap allocation for empty slices, |
1002 | 0 | // so the pointer isn't aligned enough for the KIND_VEC stashing to |
1003 | 0 | // work. |
1004 | 0 | if slice.is_empty() { |
1005 | 0 | return Bytes::new(); |
1006 | 0 | } |
1007 | 0 |
|
1008 | 0 | let len = slice.len(); |
1009 | 0 | let ptr = Box::into_raw(slice) as *mut u8; |
1010 | 0 |
|
1011 | 0 | if ptr as usize & 0x1 == 0 { |
1012 | 0 | let data = ptr_map(ptr, |addr| addr | KIND_VEC); |
1013 | 0 | Bytes { |
1014 | 0 | ptr, |
1015 | 0 | len, |
1016 | 0 | data: AtomicPtr::new(data.cast()), |
1017 | 0 | vtable: &PROMOTABLE_EVEN_VTABLE, |
1018 | 0 | } |
1019 | | } else { |
1020 | 0 | Bytes { |
1021 | 0 | ptr, |
1022 | 0 | len, |
1023 | 0 | data: AtomicPtr::new(ptr.cast()), |
1024 | 0 | vtable: &PROMOTABLE_ODD_VTABLE, |
1025 | 0 | } |
1026 | | } |
1027 | 0 | } |
1028 | | } |
1029 | | |
1030 | | impl From<Bytes> for BytesMut { |
1031 | | /// Convert self into `BytesMut`. |
1032 | | /// |
1033 | | /// If `bytes` is unique for the entire original buffer, this will return a |
1034 | | /// `BytesMut` with the contents of `bytes` without copying. |
1035 | | /// If `bytes` is not unique for the entire original buffer, this will make |
1036 | | /// a copy of `bytes` subset of the original buffer in a new `BytesMut`. |
1037 | | /// |
1038 | | /// # Examples |
1039 | | /// |
1040 | | /// ``` |
1041 | | /// use bytes::{Bytes, BytesMut}; |
1042 | | /// |
1043 | | /// let bytes = Bytes::from(b"hello".to_vec()); |
1044 | | /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..])); |
1045 | | /// ``` |
1046 | 0 | fn from(bytes: Bytes) -> Self { |
1047 | 0 | let bytes = ManuallyDrop::new(bytes); |
1048 | 0 | unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) } |
1049 | 0 | } |
1050 | | } |
1051 | | |
1052 | | impl From<String> for Bytes { |
1053 | 0 | fn from(s: String) -> Bytes { |
1054 | 0 | Bytes::from(s.into_bytes()) |
1055 | 0 | } |
1056 | | } |
1057 | | |
1058 | | impl From<Bytes> for Vec<u8> { |
1059 | 0 | fn from(bytes: Bytes) -> Vec<u8> { |
1060 | 0 | let bytes = ManuallyDrop::new(bytes); |
1061 | 0 | unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) } |
1062 | 0 | } |
1063 | | } |
1064 | | |
1065 | | // ===== impl Vtable ===== |
1066 | | |
1067 | | impl fmt::Debug for Vtable { |
1068 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1069 | 0 | f.debug_struct("Vtable") |
1070 | 0 | .field("clone", &(self.clone as *const ())) |
1071 | 0 | .field("drop", &(self.drop as *const ())) |
1072 | 0 | .finish() |
1073 | 0 | } |
1074 | | } |
1075 | | |
1076 | | // ===== impl StaticVtable ===== |
1077 | | |
1078 | | const STATIC_VTABLE: Vtable = Vtable { |
1079 | | clone: static_clone, |
1080 | | to_vec: static_to_vec, |
1081 | | to_mut: static_to_mut, |
1082 | | is_unique: static_is_unique, |
1083 | | drop: static_drop, |
1084 | | }; |
1085 | | |
1086 | 0 | unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { |
1087 | 0 | let slice = slice::from_raw_parts(ptr, len); |
1088 | 0 | Bytes::from_static(slice) |
1089 | 0 | } |
1090 | | |
1091 | 0 | unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { |
1092 | 0 | let slice = slice::from_raw_parts(ptr, len); |
1093 | 0 | slice.to_vec() |
1094 | 0 | } |
1095 | | |
1096 | 0 | unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { |
1097 | 0 | let slice = slice::from_raw_parts(ptr, len); |
1098 | 0 | BytesMut::from(slice) |
1099 | 0 | } |
1100 | | |
1101 | 0 | fn static_is_unique(_: &AtomicPtr<()>) -> bool { |
1102 | 0 | false |
1103 | 0 | } |
1104 | | |
1105 | 0 | unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { |
1106 | 0 | // nothing to drop for &'static [u8] |
1107 | 0 | } |
1108 | | |
1109 | | // ===== impl OwnedVtable ===== |
1110 | | |
1111 | | #[repr(C)] |
1112 | | struct OwnedLifetime { |
1113 | | ref_cnt: AtomicUsize, |
1114 | | drop: unsafe fn(*mut ()), |
1115 | | } |
1116 | | |
1117 | | #[repr(C)] |
1118 | | struct Owned<T> { |
1119 | | lifetime: OwnedLifetime, |
1120 | | owner: T, |
1121 | | } |
1122 | | |
1123 | 0 | unsafe fn owned_box_and_drop<T>(ptr: *mut ()) { |
1124 | 0 | let b: Box<Owned<T>> = Box::from_raw(ptr as _); |
1125 | 0 | drop(b); |
1126 | 0 | } |
1127 | | |
1128 | 0 | unsafe fn owned_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { |
1129 | 0 | let owned = data.load(Ordering::Relaxed); |
1130 | 0 | let ref_cnt = &(*owned.cast::<OwnedLifetime>()).ref_cnt; |
1131 | 0 | let old_cnt = ref_cnt.fetch_add(1, Ordering::Relaxed); |
1132 | 0 | if old_cnt > usize::MAX >> 1 { |
1133 | 0 | crate::abort() |
1134 | 0 | } |
1135 | 0 |
|
1136 | 0 | Bytes { |
1137 | 0 | ptr, |
1138 | 0 | len, |
1139 | 0 | data: AtomicPtr::new(owned as _), |
1140 | 0 | vtable: &OWNED_VTABLE, |
1141 | 0 | } |
1142 | 0 | } |
1143 | | |
1144 | 0 | unsafe fn owned_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { |
1145 | 0 | let slice = slice::from_raw_parts(ptr, len); |
1146 | 0 | let vec = slice.to_vec(); |
1147 | 0 | owned_drop_impl(data.load(Ordering::Relaxed)); |
1148 | 0 | vec |
1149 | 0 | } |
1150 | | |
1151 | 0 | unsafe fn owned_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { |
1152 | 0 | BytesMut::from_vec(owned_to_vec(data, ptr, len)) |
1153 | 0 | } |
1154 | | |
1155 | 0 | unsafe fn owned_is_unique(_data: &AtomicPtr<()>) -> bool { |
1156 | 0 | false |
1157 | 0 | } |
1158 | | |
1159 | 0 | unsafe fn owned_drop_impl(owned: *mut ()) { |
1160 | 0 | let lifetime = owned.cast::<OwnedLifetime>(); |
1161 | 0 | let ref_cnt = &(*lifetime).ref_cnt; |
1162 | 0 |
|
1163 | 0 | let old_cnt = ref_cnt.fetch_sub(1, Ordering::Release); |
1164 | 0 | debug_assert!( |
1165 | 0 | old_cnt > 0 && old_cnt <= usize::MAX >> 1, |
1166 | | "expected non-zero refcount and no underflow" |
1167 | | ); |
1168 | 0 | if old_cnt != 1 { |
1169 | 0 | return; |
1170 | 0 | } |
1171 | 0 | ref_cnt.load(Ordering::Acquire); |
1172 | 0 |
|
1173 | 0 | let drop_fn = &(*lifetime).drop; |
1174 | 0 | drop_fn(owned) |
1175 | 0 | } |
1176 | | |
1177 | 0 | unsafe fn owned_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { |
1178 | 0 | let owned = data.load(Ordering::Relaxed); |
1179 | 0 | owned_drop_impl(owned); |
1180 | 0 | } |
1181 | | |
1182 | | static OWNED_VTABLE: Vtable = Vtable { |
1183 | | clone: owned_clone, |
1184 | | to_vec: owned_to_vec, |
1185 | | to_mut: owned_to_mut, |
1186 | | is_unique: owned_is_unique, |
1187 | | drop: owned_drop, |
1188 | | }; |
1189 | | |
1190 | | // ===== impl PromotableVtable ===== |
1191 | | |
1192 | | static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { |
1193 | | clone: promotable_even_clone, |
1194 | | to_vec: promotable_even_to_vec, |
1195 | | to_mut: promotable_even_to_mut, |
1196 | | is_unique: promotable_is_unique, |
1197 | | drop: promotable_even_drop, |
1198 | | }; |
1199 | | |
1200 | | static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { |
1201 | | clone: promotable_odd_clone, |
1202 | | to_vec: promotable_odd_to_vec, |
1203 | | to_mut: promotable_odd_to_mut, |
1204 | | is_unique: promotable_is_unique, |
1205 | | drop: promotable_odd_drop, |
1206 | | }; |
1207 | | |
1208 | 0 | unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { |
1209 | 0 | let shared = data.load(Ordering::Acquire); |
1210 | 0 | let kind = shared as usize & KIND_MASK; |
1211 | 0 |
|
1212 | 0 | if kind == KIND_ARC { |
1213 | 0 | shallow_clone_arc(shared.cast(), ptr, len) |
1214 | | } else { |
1215 | 0 | debug_assert_eq!(kind, KIND_VEC); |
1216 | 0 | let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); |
1217 | 0 | shallow_clone_vec(data, shared, buf, ptr, len) |
1218 | | } |
1219 | 0 | } |
1220 | | |
1221 | 0 | unsafe fn promotable_to_vec( |
1222 | 0 | data: &AtomicPtr<()>, |
1223 | 0 | ptr: *const u8, |
1224 | 0 | len: usize, |
1225 | 0 | f: fn(*mut ()) -> *mut u8, |
1226 | 0 | ) -> Vec<u8> { |
1227 | 0 | let shared = data.load(Ordering::Acquire); |
1228 | 0 | let kind = shared as usize & KIND_MASK; |
1229 | 0 |
|
1230 | 0 | if kind == KIND_ARC { |
1231 | 0 | shared_to_vec_impl(shared.cast(), ptr, len) |
1232 | | } else { |
1233 | | // If Bytes holds a Vec, then the offset must be 0. |
1234 | 0 | debug_assert_eq!(kind, KIND_VEC); |
1235 | | |
1236 | 0 | let buf = f(shared); |
1237 | 0 |
|
1238 | 0 | let cap = offset_from(ptr, buf) + len; |
1239 | 0 |
|
1240 | 0 | // Copy back buffer |
1241 | 0 | ptr::copy(ptr, buf, len); |
1242 | 0 |
|
1243 | 0 | Vec::from_raw_parts(buf, len, cap) |
1244 | | } |
1245 | 0 | } |
1246 | | |
1247 | 0 | unsafe fn promotable_to_mut( |
1248 | 0 | data: &AtomicPtr<()>, |
1249 | 0 | ptr: *const u8, |
1250 | 0 | len: usize, |
1251 | 0 | f: fn(*mut ()) -> *mut u8, |
1252 | 0 | ) -> BytesMut { |
1253 | 0 | let shared = data.load(Ordering::Acquire); |
1254 | 0 | let kind = shared as usize & KIND_MASK; |
1255 | 0 |
|
1256 | 0 | if kind == KIND_ARC { |
1257 | 0 | shared_to_mut_impl(shared.cast(), ptr, len) |
1258 | | } else { |
1259 | | // KIND_VEC is a view of an underlying buffer at a certain offset. |
1260 | | // The ptr + len always represents the end of that buffer. |
1261 | | // Before truncating it, it is first promoted to KIND_ARC. |
1262 | | // Thus, we can safely reconstruct a Vec from it without leaking memory. |
1263 | 0 | debug_assert_eq!(kind, KIND_VEC); |
1264 | | |
1265 | 0 | let buf = f(shared); |
1266 | 0 | let off = offset_from(ptr, buf); |
1267 | 0 | let cap = off + len; |
1268 | 0 | let v = Vec::from_raw_parts(buf, cap, cap); |
1269 | 0 |
|
1270 | 0 | let mut b = BytesMut::from_vec(v); |
1271 | 0 | b.advance_unchecked(off); |
1272 | 0 | b |
1273 | | } |
1274 | 0 | } |
1275 | | |
1276 | 0 | unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { |
1277 | 0 | promotable_to_vec(data, ptr, len, |shared| { |
1278 | 0 | ptr_map(shared.cast(), |addr| addr & !KIND_MASK) |
1279 | 0 | }) |
1280 | 0 | } |
1281 | | |
1282 | 0 | unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { |
1283 | 0 | promotable_to_mut(data, ptr, len, |shared| { |
1284 | 0 | ptr_map(shared.cast(), |addr| addr & !KIND_MASK) |
1285 | 0 | }) |
1286 | 0 | } |
1287 | | |
1288 | 0 | unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { |
1289 | 0 | data.with_mut(|shared| { |
1290 | 0 | let shared = *shared; |
1291 | 0 | let kind = shared as usize & KIND_MASK; |
1292 | 0 |
|
1293 | 0 | if kind == KIND_ARC { |
1294 | 0 | release_shared(shared.cast()); |
1295 | 0 | } else { |
1296 | 0 | debug_assert_eq!(kind, KIND_VEC); |
1297 | 0 | let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); |
1298 | 0 | free_boxed_slice(buf, ptr, len); |
1299 | 0 | } |
1300 | 0 | }); |
1301 | 0 | } |
1302 | | |
1303 | 0 | unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { |
1304 | 0 | let shared = data.load(Ordering::Acquire); |
1305 | 0 | let kind = shared as usize & KIND_MASK; |
1306 | 0 |
|
1307 | 0 | if kind == KIND_ARC { |
1308 | 0 | shallow_clone_arc(shared as _, ptr, len) |
1309 | | } else { |
1310 | 0 | debug_assert_eq!(kind, KIND_VEC); |
1311 | 0 | shallow_clone_vec(data, shared, shared.cast(), ptr, len) |
1312 | | } |
1313 | 0 | } |
1314 | | |
1315 | 0 | unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { |
1316 | 0 | promotable_to_vec(data, ptr, len, |shared| shared.cast()) |
1317 | 0 | } |
1318 | | |
1319 | 0 | unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { |
1320 | 0 | promotable_to_mut(data, ptr, len, |shared| shared.cast()) |
1321 | 0 | } |
1322 | | |
1323 | 0 | unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { |
1324 | 0 | data.with_mut(|shared| { |
1325 | 0 | let shared = *shared; |
1326 | 0 | let kind = shared as usize & KIND_MASK; |
1327 | 0 |
|
1328 | 0 | if kind == KIND_ARC { |
1329 | 0 | release_shared(shared.cast()); |
1330 | 0 | } else { |
1331 | 0 | debug_assert_eq!(kind, KIND_VEC); |
1332 | | |
1333 | 0 | free_boxed_slice(shared.cast(), ptr, len); |
1334 | | } |
1335 | 0 | }); |
1336 | 0 | } |
1337 | | |
1338 | 0 | unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool { |
1339 | 0 | let shared = data.load(Ordering::Acquire); |
1340 | 0 | let kind = shared as usize & KIND_MASK; |
1341 | 0 |
|
1342 | 0 | if kind == KIND_ARC { |
1343 | 0 | let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed); |
1344 | 0 | ref_cnt == 1 |
1345 | | } else { |
1346 | 0 | true |
1347 | | } |
1348 | 0 | } |
1349 | | |
1350 | 0 | unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { |
1351 | 0 | let cap = offset_from(offset, buf) + len; |
1352 | 0 | dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) |
1353 | 0 | } |
1354 | | |
1355 | | // ===== impl SharedVtable ===== |
1356 | | |
1357 | | struct Shared { |
1358 | | // Holds arguments to dealloc upon Drop, but otherwise doesn't use them |
1359 | | buf: *mut u8, |
1360 | | cap: usize, |
1361 | | ref_cnt: AtomicUsize, |
1362 | | } |
1363 | | |
1364 | | impl Drop for Shared { |
1365 | 0 | fn drop(&mut self) { |
1366 | 0 | unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) } |
1367 | 0 | } |
1368 | | } |
1369 | | |
1370 | | // Assert that the alignment of `Shared` is divisible by 2. |
1371 | | // This is a necessary invariant since we depend on allocating `Shared` a |
1372 | | // shared object to implicitly carry the `KIND_ARC` flag in its pointer. |
1373 | | // This flag is set when the LSB is 0. |
1374 | | const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. |
1375 | | |
1376 | | static SHARED_VTABLE: Vtable = Vtable { |
1377 | | clone: shared_clone, |
1378 | | to_vec: shared_to_vec, |
1379 | | to_mut: shared_to_mut, |
1380 | | is_unique: shared_is_unique, |
1381 | | drop: shared_drop, |
1382 | | }; |
1383 | | |
1384 | | const KIND_ARC: usize = 0b0; |
1385 | | const KIND_VEC: usize = 0b1; |
1386 | | const KIND_MASK: usize = 0b1; |
1387 | | |
1388 | 0 | unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { |
1389 | 0 | let shared = data.load(Ordering::Relaxed); |
1390 | 0 | shallow_clone_arc(shared as _, ptr, len) |
1391 | 0 | } |
1392 | | |
1393 | 0 | unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> { |
1394 | 0 | // Check that the ref_cnt is 1 (unique). |
1395 | 0 | // |
1396 | 0 | // If it is unique, then it is set to 0 with AcqRel fence for the same |
1397 | 0 | // reason in release_shared. |
1398 | 0 | // |
1399 | 0 | // Otherwise, we take the other branch and call release_shared. |
1400 | 0 | if (*shared) |
1401 | 0 | .ref_cnt |
1402 | 0 | .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) |
1403 | 0 | .is_ok() |
1404 | | { |
1405 | | // Deallocate the `Shared` instance without running its destructor. |
1406 | 0 | let shared = *Box::from_raw(shared); |
1407 | 0 | let shared = ManuallyDrop::new(shared); |
1408 | 0 | let buf = shared.buf; |
1409 | 0 | let cap = shared.cap; |
1410 | 0 |
|
1411 | 0 | // Copy back buffer |
1412 | 0 | ptr::copy(ptr, buf, len); |
1413 | 0 |
|
1414 | 0 | Vec::from_raw_parts(buf, len, cap) |
1415 | | } else { |
1416 | 0 | let v = slice::from_raw_parts(ptr, len).to_vec(); |
1417 | 0 | release_shared(shared); |
1418 | 0 | v |
1419 | | } |
1420 | 0 | } |
1421 | | |
1422 | 0 | unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { |
1423 | 0 | shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) |
1424 | 0 | } |
1425 | | |
1426 | 0 | unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut { |
1427 | 0 | // The goal is to check if the current handle is the only handle |
1428 | 0 | // that currently has access to the buffer. This is done by |
1429 | 0 | // checking if the `ref_cnt` is currently 1. |
1430 | 0 | // |
1431 | 0 | // The `Acquire` ordering synchronizes with the `Release` as |
1432 | 0 | // part of the `fetch_sub` in `release_shared`. The `fetch_sub` |
1433 | 0 | // operation guarantees that any mutations done in other threads |
1434 | 0 | // are ordered before the `ref_cnt` is decremented. As such, |
1435 | 0 | // this `Acquire` will guarantee that those mutations are |
1436 | 0 | // visible to the current thread. |
1437 | 0 | // |
1438 | 0 | // Otherwise, we take the other branch, copy the data and call `release_shared`. |
1439 | 0 | if (*shared).ref_cnt.load(Ordering::Acquire) == 1 { |
1440 | | // Deallocate the `Shared` instance without running its destructor. |
1441 | 0 | let shared = *Box::from_raw(shared); |
1442 | 0 | let shared = ManuallyDrop::new(shared); |
1443 | 0 | let buf = shared.buf; |
1444 | 0 | let cap = shared.cap; |
1445 | 0 |
|
1446 | 0 | // Rebuild Vec |
1447 | 0 | let off = offset_from(ptr, buf); |
1448 | 0 | let v = Vec::from_raw_parts(buf, len + off, cap); |
1449 | 0 |
|
1450 | 0 | let mut b = BytesMut::from_vec(v); |
1451 | 0 | b.advance_unchecked(off); |
1452 | 0 | b |
1453 | | } else { |
1454 | | // Copy the data from Shared in a new Vec, then release it |
1455 | 0 | let v = slice::from_raw_parts(ptr, len).to_vec(); |
1456 | 0 | release_shared(shared); |
1457 | 0 | BytesMut::from_vec(v) |
1458 | | } |
1459 | 0 | } |
1460 | | |
1461 | 0 | unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { |
1462 | 0 | shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len) |
1463 | 0 | } |
1464 | | |
1465 | 0 | pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool { |
1466 | 0 | let shared = data.load(Ordering::Acquire); |
1467 | 0 | let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed); |
1468 | 0 | ref_cnt == 1 |
1469 | 0 | } |
1470 | | |
1471 | 0 | unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { |
1472 | 0 | data.with_mut(|shared| { |
1473 | 0 | release_shared(shared.cast()); |
1474 | 0 | }); |
1475 | 0 | } |
1476 | | |
1477 | 0 | unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { |
1478 | 0 | let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); |
1479 | 0 |
|
1480 | 0 | if old_size > usize::MAX >> 1 { |
1481 | 0 | crate::abort(); |
1482 | 0 | } |
1483 | 0 |
|
1484 | 0 | Bytes { |
1485 | 0 | ptr, |
1486 | 0 | len, |
1487 | 0 | data: AtomicPtr::new(shared as _), |
1488 | 0 | vtable: &SHARED_VTABLE, |
1489 | 0 | } |
1490 | 0 | } |
1491 | | |
1492 | | #[cold] |
1493 | 0 | unsafe fn shallow_clone_vec( |
1494 | 0 | atom: &AtomicPtr<()>, |
1495 | 0 | ptr: *const (), |
1496 | 0 | buf: *mut u8, |
1497 | 0 | offset: *const u8, |
1498 | 0 | len: usize, |
1499 | 0 | ) -> Bytes { |
1500 | 0 | // If the buffer is still tracked in a `Vec<u8>`. It is time to |
1501 | 0 | // promote the vec to an `Arc`. This could potentially be called |
1502 | 0 | // concurrently, so some care must be taken. |
1503 | 0 |
|
1504 | 0 | // First, allocate a new `Shared` instance containing the |
1505 | 0 | // `Vec` fields. It's important to note that `ptr`, `len`, |
1506 | 0 | // and `cap` cannot be mutated without having `&mut self`. |
1507 | 0 | // This means that these fields will not be concurrently |
1508 | 0 | // updated and since the buffer hasn't been promoted to an |
1509 | 0 | // `Arc`, those three fields still are the components of the |
1510 | 0 | // vector. |
1511 | 0 | let shared = Box::new(Shared { |
1512 | 0 | buf, |
1513 | 0 | cap: offset_from(offset, buf) + len, |
1514 | 0 | // Initialize refcount to 2. One for this reference, and one |
1515 | 0 | // for the new clone that will be returned from |
1516 | 0 | // `shallow_clone`. |
1517 | 0 | ref_cnt: AtomicUsize::new(2), |
1518 | 0 | }); |
1519 | 0 |
|
1520 | 0 | let shared = Box::into_raw(shared); |
1521 | 0 |
|
1522 | 0 | // The pointer should be aligned, so this assert should |
1523 | 0 | // always succeed. |
1524 | 0 | debug_assert!( |
1525 | 0 | 0 == (shared as usize & KIND_MASK), |
1526 | | "internal: Box<Shared> should have an aligned pointer", |
1527 | | ); |
1528 | | |
1529 | | // Try compare & swapping the pointer into the `arc` field. |
1530 | | // `Release` is used synchronize with other threads that |
1531 | | // will load the `arc` field. |
1532 | | // |
1533 | | // If the `compare_exchange` fails, then the thread lost the |
1534 | | // race to promote the buffer to shared. The `Acquire` |
1535 | | // ordering will synchronize with the `compare_exchange` |
1536 | | // that happened in the other thread and the `Shared` |
1537 | | // pointed to by `actual` will be visible. |
1538 | 0 | match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) { |
1539 | 0 | Ok(actual) => { |
1540 | 0 | debug_assert!(actual as usize == ptr as usize); |
1541 | | // The upgrade was successful, the new handle can be |
1542 | | // returned. |
1543 | 0 | Bytes { |
1544 | 0 | ptr: offset, |
1545 | 0 | len, |
1546 | 0 | data: AtomicPtr::new(shared as _), |
1547 | 0 | vtable: &SHARED_VTABLE, |
1548 | 0 | } |
1549 | | } |
1550 | 0 | Err(actual) => { |
1551 | 0 | // The upgrade failed, a concurrent clone happened. Release |
1552 | 0 | // the allocation that was made in this thread, it will not |
1553 | 0 | // be needed. |
1554 | 0 | let shared = Box::from_raw(shared); |
1555 | 0 | mem::forget(*shared); |
1556 | 0 |
|
1557 | 0 | // Buffer already promoted to shared storage, so increment ref |
1558 | 0 | // count. |
1559 | 0 | shallow_clone_arc(actual as _, offset, len) |
1560 | | } |
1561 | | } |
1562 | 0 | } |
1563 | | |
1564 | 0 | unsafe fn release_shared(ptr: *mut Shared) { |
1565 | 0 | // `Shared` storage... follow the drop steps from Arc. |
1566 | 0 | if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { |
1567 | 0 | return; |
1568 | 0 | } |
1569 | 0 |
|
1570 | 0 | // This fence is needed to prevent reordering of use of the data and |
1571 | 0 | // deletion of the data. Because it is marked `Release`, the decreasing |
1572 | 0 | // of the reference count synchronizes with this `Acquire` fence. This |
1573 | 0 | // means that use of the data happens before decreasing the reference |
1574 | 0 | // count, which happens before this fence, which happens before the |
1575 | 0 | // deletion of the data. |
1576 | 0 | // |
1577 | 0 | // As explained in the [Boost documentation][1], |
1578 | 0 | // |
1579 | 0 | // > It is important to enforce any possible access to the object in one |
1580 | 0 | // > thread (through an existing reference) to *happen before* deleting |
1581 | 0 | // > the object in a different thread. This is achieved by a "release" |
1582 | 0 | // > operation after dropping a reference (any access to the object |
1583 | 0 | // > through this reference must obviously happened before), and an |
1584 | 0 | // > "acquire" operation before deleting the object. |
1585 | 0 | // |
1586 | 0 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
1587 | 0 | // |
1588 | 0 | // Thread sanitizer does not support atomic fences. Use an atomic load |
1589 | 0 | // instead. |
1590 | 0 | (*ptr).ref_cnt.load(Ordering::Acquire); |
1591 | 0 |
|
1592 | 0 | // Drop the data |
1593 | 0 | drop(Box::from_raw(ptr)); |
1594 | 0 | } |
1595 | | |
1596 | | // Ideally we would always use this version of `ptr_map` since it is strict |
1597 | | // provenance compatible, but it results in worse codegen. We will however still |
1598 | | // use it on miri because it gives better diagnostics for people who test bytes |
1599 | | // code with miri. |
1600 | | // |
1601 | | // See https://github.com/tokio-rs/bytes/pull/545 for more info. |
1602 | | #[cfg(miri)] |
1603 | | fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 |
1604 | | where |
1605 | | F: FnOnce(usize) -> usize, |
1606 | | { |
1607 | | let old_addr = ptr as usize; |
1608 | | let new_addr = f(old_addr); |
1609 | | let diff = new_addr.wrapping_sub(old_addr); |
1610 | | ptr.wrapping_add(diff) |
1611 | | } |
1612 | | |
1613 | | #[cfg(not(miri))] |
1614 | 0 | fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 |
1615 | 0 | where |
1616 | 0 | F: FnOnce(usize) -> usize, |
1617 | 0 | { |
1618 | 0 | let old_addr = ptr as usize; |
1619 | 0 | let new_addr = f(old_addr); |
1620 | 0 | new_addr as *mut u8 |
1621 | 0 | } Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_drop::{closure#0}::{closure#0}> Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_to_mut::{closure#0}::{closure#0}> Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_to_vec::{closure#0}::{closure#0}> Unexecuted instantiation: bytes::bytes::ptr_map::<bytes::bytes::promotable_even_clone::{closure#0}> Unexecuted instantiation: bytes::bytes::ptr_map::<<bytes::bytes::Bytes as core::convert::From<alloc::boxed::Box<[u8]>>>::from::{closure#0}> |
1622 | | |
1623 | 0 | fn without_provenance(ptr: usize) -> *const u8 { |
1624 | 0 | core::ptr::null::<u8>().wrapping_add(ptr) |
1625 | 0 | } |
1626 | | |
1627 | | // compile-fails |
1628 | | |
1629 | | /// ```compile_fail |
1630 | | /// use bytes::Bytes; |
1631 | | /// #[deny(unused_must_use)] |
1632 | | /// { |
1633 | | /// let mut b1 = Bytes::from("hello world"); |
1634 | | /// b1.split_to(6); |
1635 | | /// } |
1636 | | /// ``` |
1637 | 0 | fn _split_to_must_use() {} |
1638 | | |
1639 | | /// ```compile_fail |
1640 | | /// use bytes::Bytes; |
1641 | | /// #[deny(unused_must_use)] |
1642 | | /// { |
1643 | | /// let mut b1 = Bytes::from("hello world"); |
1644 | | /// b1.split_off(6); |
1645 | | /// } |
1646 | | /// ``` |
1647 | 0 | fn _split_off_must_use() {} |
1648 | | |
1649 | | // fuzz tests |
1650 | | #[cfg(all(test, loom))] |
1651 | | mod fuzz { |
1652 | | use loom::sync::Arc; |
1653 | | use loom::thread; |
1654 | | |
1655 | | use super::Bytes; |
1656 | | #[test] |
1657 | | fn bytes_cloning_vec() { |
1658 | | loom::model(|| { |
1659 | | let a = Bytes::from(b"abcdefgh".to_vec()); |
1660 | | let addr = a.as_ptr() as usize; |
1661 | | |
1662 | | // test the Bytes::clone is Sync by putting it in an Arc |
1663 | | let a1 = Arc::new(a); |
1664 | | let a2 = a1.clone(); |
1665 | | |
1666 | | let t1 = thread::spawn(move || { |
1667 | | let b: Bytes = (*a1).clone(); |
1668 | | assert_eq!(b.as_ptr() as usize, addr); |
1669 | | }); |
1670 | | |
1671 | | let t2 = thread::spawn(move || { |
1672 | | let b: Bytes = (*a2).clone(); |
1673 | | assert_eq!(b.as_ptr() as usize, addr); |
1674 | | }); |
1675 | | |
1676 | | t1.join().unwrap(); |
1677 | | t2.join().unwrap(); |
1678 | | }); |
1679 | | } |
1680 | | } |