bytes/
bytes.rs

1use core::mem::{self, ManuallyDrop};
2use core::ops::{Deref, RangeBounds};
3use core::ptr::NonNull;
4use core::{cmp, fmt, hash, ptr, slice};
5
6use alloc::{
7    alloc::{dealloc, Layout},
8    borrow::Borrow,
9    boxed::Box,
10    string::String,
11    vec::Vec,
12};
13
14use crate::buf::IntoIter;
15#[allow(unused)]
16use crate::loom::sync::atomic::AtomicMut;
17use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
18use crate::{Buf, BytesMut};
19
20/// A cheaply cloneable and sliceable chunk of contiguous memory.
21///
22/// `Bytes` is an efficient container for storing and operating on contiguous
23/// slices of memory. It is intended for use primarily in networking code, but
24/// could have applications elsewhere as well.
25///
26/// `Bytes` values facilitate zero-copy network programming by allowing multiple
27/// `Bytes` objects to point to the same underlying memory.
28///
29/// `Bytes` does not have a single implementation. It is an interface, whose
30/// exact behavior is implemented through dynamic dispatch in several underlying
31/// implementations of `Bytes`.
32///
33/// All `Bytes` implementations must fulfill the following requirements:
34/// - They are cheaply cloneable and thereby shareable between an unlimited amount
35///   of components, for example by modifying a reference count.
36/// - Instances can be sliced to refer to a subset of the original buffer.
37///
38/// ```
39/// use bytes::Bytes;
40///
41/// let mut mem = Bytes::from("Hello world");
42/// let a = mem.slice(0..5);
43///
44/// assert_eq!(a, "Hello");
45///
46/// let b = mem.split_to(6);
47///
48/// assert_eq!(mem, "world");
49/// assert_eq!(b, "Hello ");
50/// ```
51///
52/// # Memory layout
53///
54/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
55/// to track information about which segment of the underlying memory the
56/// `Bytes` handle has access to.
57///
58/// `Bytes` keeps both a pointer to the shared state containing the full memory
59/// slice and a pointer to the start of the region visible by the handle.
60/// `Bytes` also tracks the length of its view into the memory.
61///
62/// # Sharing
63///
64/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
65/// how sharing/cloning is implemented in detail.
66/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
67/// cloning the backing storage in order to share it behind multiple `Bytes`
68/// instances.
69///
70/// For `Bytes` implementations which refer to constant memory (e.g. created
71/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
72///
73/// For `Bytes` implementations which point to a reference counted shared storage
74/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
75/// reference count.
76///
77/// Due to this mechanism, multiple `Bytes` instances may point to the same
78/// shared memory region.
79/// Each `Bytes` instance can point to different sections within that
80/// memory region, and `Bytes` instances may or may not have overlapping views
81/// into the memory.
82///
83/// The following diagram visualizes a scenario where 2 `Bytes` instances make
84/// use of an `Arc`-based backing storage, and provide access to different views:
85///
86/// ```text
87///
88///    Arc ptrs                   ┌─────────┐
89///    ________________________ / │ Bytes 2 │
90///   /                           └─────────┘
91///  /          ┌───────────┐     |         |
92/// |_________/ │  Bytes 1  │     |         |
93/// |           └───────────┘     |         |
94/// |           |           | ___/ data     | tail
95/// |      data |      tail |/              |
96/// v           v           v               v
97/// ┌─────┬─────┬───────────┬───────────────┬─────┐
98/// │ Arc │     │           │               │     │
99/// └─────┴─────┴───────────┴───────────────┴─────┘
100/// ```
101pub struct Bytes {
102    ptr: *const u8,
103    len: usize,
104    // inlined "trait object"
105    data: AtomicPtr<()>,
106    vtable: &'static Vtable,
107}
108
109pub(crate) struct Vtable {
110    /// fn(data, ptr, len)
111    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
112    /// fn(data, ptr, len)
113    ///
114    /// `into_*` consumes the `Bytes`, returning the respective value.
115    pub into_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
116    pub into_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut,
117    /// fn(data)
118    pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool,
119    /// fn(data, ptr, len)
120    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
121}
122
123impl Bytes {
124    /// Creates a new empty `Bytes`.
125    ///
126    /// This will not allocate and the returned `Bytes` handle will be empty.
127    ///
128    /// # Examples
129    ///
130    /// ```
131    /// use bytes::Bytes;
132    ///
133    /// let b = Bytes::new();
134    /// assert_eq!(&b[..], b"");
135    /// ```
136    #[inline]
137    #[cfg(not(all(loom, test)))]
138    pub const fn new() -> Self {
139        // Make it a named const to work around
140        // "unsizing casts are not allowed in const fn"
141        const EMPTY: &[u8] = &[];
142        Bytes::from_static(EMPTY)
143    }
144
145    /// Creates a new empty `Bytes`.
146    #[cfg(all(loom, test))]
147    pub fn new() -> Self {
148        const EMPTY: &[u8] = &[];
149        Bytes::from_static(EMPTY)
150    }
151
152    /// Creates a new `Bytes` from a static slice.
153    ///
154    /// The returned `Bytes` will point directly to the static slice. There is
155    /// no allocating or copying.
156    ///
157    /// # Examples
158    ///
159    /// ```
160    /// use bytes::Bytes;
161    ///
162    /// let b = Bytes::from_static(b"hello");
163    /// assert_eq!(&b[..], b"hello");
164    /// ```
165    #[inline]
166    #[cfg(not(all(loom, test)))]
167    pub const fn from_static(bytes: &'static [u8]) -> Self {
168        Bytes {
169            ptr: bytes.as_ptr(),
170            len: bytes.len(),
171            data: AtomicPtr::new(ptr::null_mut()),
172            vtable: &STATIC_VTABLE,
173        }
174    }
175
176    /// Creates a new `Bytes` from a static slice.
177    #[cfg(all(loom, test))]
178    pub fn from_static(bytes: &'static [u8]) -> Self {
179        Bytes {
180            ptr: bytes.as_ptr(),
181            len: bytes.len(),
182            data: AtomicPtr::new(ptr::null_mut()),
183            vtable: &STATIC_VTABLE,
184        }
185    }
186
187    /// Creates a new `Bytes` with length zero and the given pointer as the address.
188    fn new_empty_with_ptr(ptr: *const u8) -> Self {
189        debug_assert!(!ptr.is_null());
190
191        // Detach this pointer's provenance from whichever allocation it came from, and reattach it
192        // to the provenance of the fake ZST [u8;0] at the same address.
193        let ptr = without_provenance(ptr as usize);
194
195        Bytes {
196            ptr,
197            len: 0,
198            data: AtomicPtr::new(ptr::null_mut()),
199            vtable: &STATIC_VTABLE,
200        }
201    }
202
203    /// Create [Bytes] with a buffer whose lifetime is controlled
204    /// via an explicit owner.
205    ///
206    /// A common use case is to zero-copy construct from mapped memory.
207    ///
208    /// ```
209    /// # struct File;
210    /// #
211    /// # impl File {
212    /// #     pub fn open(_: &str) -> Result<Self, ()> {
213    /// #         Ok(Self)
214    /// #     }
215    /// # }
216    /// #
217    /// # mod memmap2 {
218    /// #     pub struct Mmap;
219    /// #
220    /// #     impl Mmap {
221    /// #         pub unsafe fn map(_file: &super::File) -> Result<Self, ()> {
222    /// #             Ok(Self)
223    /// #         }
224    /// #     }
225    /// #
226    /// #     impl AsRef<[u8]> for Mmap {
227    /// #         fn as_ref(&self) -> &[u8] {
228    /// #             b"buf"
229    /// #         }
230    /// #     }
231    /// # }
232    /// use bytes::Bytes;
233    /// use memmap2::Mmap;
234    ///
235    /// # fn main() -> Result<(), ()> {
236    /// let file = File::open("upload_bundle.tar.gz")?;
237    /// let mmap = unsafe { Mmap::map(&file) }?;
238    /// let b = Bytes::from_owner(mmap);
239    /// # Ok(())
240    /// # }
241    /// ```
242    ///
243    /// The `owner` will be transferred to the constructed [Bytes] object, which
244    /// will ensure it is dropped once all remaining clones of the constructed
245    /// object are dropped. The owner will then be responsible for dropping the
246    /// specified region of memory as part of its [Drop] implementation.
247    ///
248    /// Note that converting [Bytes] constructed from an owner into a [BytesMut]
249    /// will always create a deep copy of the buffer into newly allocated memory.
250    pub fn from_owner<T>(owner: T) -> Self
251    where
252        T: AsRef<[u8]> + Send + 'static,
253    {
254        // Safety & Miri:
255        // The ownership of `owner` is first transferred to the `Owned` wrapper and `Bytes` object.
256        // This ensures that the owner is pinned in memory, allowing us to call `.as_ref()` safely
257        // since the lifetime of the owner is controlled by the lifetime of the new `Bytes` object,
258        // and the lifetime of the resulting borrowed `&[u8]` matches that of the owner.
259        // Note that this remains safe so long as we only call `.as_ref()` once.
260        //
261        // There are some additional special considerations here:
262        //   * We rely on Bytes's Drop impl to clean up memory should `.as_ref()` panic.
263        //   * Setting the `ptr` and `len` on the bytes object last (after moving the owner to
264        //     Bytes) allows Miri checks to pass since it avoids obtaining the `&[u8]` slice
265        //     from a stack-owned Box.
266        // More details on this: https://github.com/tokio-rs/bytes/pull/742/#discussion_r1813375863
267        //                  and: https://github.com/tokio-rs/bytes/pull/742/#discussion_r1813316032
268
269        let owned = Box::into_raw(Box::new(Owned {
270            ref_cnt: AtomicUsize::new(1),
271            owner,
272        }));
273
274        let mut ret = Bytes {
275            ptr: NonNull::dangling().as_ptr(),
276            len: 0,
277            data: AtomicPtr::new(owned.cast()),
278            vtable: &Owned::<T>::VTABLE,
279        };
280
281        let buf = unsafe { &*owned }.owner.as_ref();
282        ret.ptr = buf.as_ptr();
283        ret.len = buf.len();
284
285        ret
286    }
287
288    /// Returns the number of bytes contained in this `Bytes`.
289    ///
290    /// # Examples
291    ///
292    /// ```
293    /// use bytes::Bytes;
294    ///
295    /// let b = Bytes::from(&b"hello"[..]);
296    /// assert_eq!(b.len(), 5);
297    /// ```
298    #[inline]
299    pub const fn len(&self) -> usize {
300        self.len
301    }
302
303    /// Returns true if the `Bytes` has a length of 0.
304    ///
305    /// # Examples
306    ///
307    /// ```
308    /// use bytes::Bytes;
309    ///
310    /// let b = Bytes::new();
311    /// assert!(b.is_empty());
312    /// ```
313    #[inline]
314    pub const fn is_empty(&self) -> bool {
315        self.len == 0
316    }
317
318    /// Returns true if this is the only reference to the data and
319    /// `Into<BytesMut>` would avoid cloning the underlying buffer.
320    ///
321    /// Always returns false if the data is backed by a [static slice](Bytes::from_static),
322    /// or an [owner](Bytes::from_owner).
323    ///
324    /// The result of this method may be invalidated immediately if another
325    /// thread clones this value while this is being called. Ensure you have
326    /// unique access to this value (`&mut Bytes`) first if you need to be
327    /// certain the result is valid (i.e. for safety reasons).
328    /// # Examples
329    ///
330    /// ```
331    /// use bytes::Bytes;
332    ///
333    /// let a = Bytes::from(vec![1, 2, 3]);
334    /// assert!(a.is_unique());
335    /// let b = a.clone();
336    /// assert!(!a.is_unique());
337    /// ```
338    pub fn is_unique(&self) -> bool {
339        unsafe { (self.vtable.is_unique)(&self.data) }
340    }
341
342    /// Creates `Bytes` instance from slice, by copying it.
343    pub fn copy_from_slice(data: &[u8]) -> Self {
344        data.to_vec().into()
345    }
346
347    /// Returns a slice of self for the provided range.
348    ///
349    /// This will increment the reference count for the underlying memory and
350    /// return a new `Bytes` handle set to the slice.
351    ///
352    /// This operation is `O(1)`.
353    ///
354    /// # Examples
355    ///
356    /// ```
357    /// use bytes::Bytes;
358    ///
359    /// let a = Bytes::from(&b"hello world"[..]);
360    /// let b = a.slice(2..5);
361    ///
362    /// assert_eq!(&b[..], b"llo");
363    /// ```
364    ///
365    /// # Panics
366    ///
367    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
368    /// will panic.
369    pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
370        use core::ops::Bound;
371
372        let len = self.len();
373
374        let begin = match range.start_bound() {
375            Bound::Included(&n) => n,
376            Bound::Excluded(&n) => n.checked_add(1).expect("out of range"),
377            Bound::Unbounded => 0,
378        };
379
380        let end = match range.end_bound() {
381            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
382            Bound::Excluded(&n) => n,
383            Bound::Unbounded => len,
384        };
385
386        assert!(
387            begin <= end,
388            "range start must not be greater than end: {:?} <= {:?}",
389            begin,
390            end,
391        );
392        assert!(
393            end <= len,
394            "range end out of bounds: {:?} <= {:?}",
395            end,
396            len,
397        );
398
399        if end == begin {
400            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(begin));
401        }
402
403        let mut ret = self.clone();
404
405        ret.len = end - begin;
406        ret.ptr = unsafe { ret.ptr.add(begin) };
407
408        ret
409    }
410
411    /// Returns a slice of self that is equivalent to the given `subset`.
412    ///
413    /// When processing a `Bytes` buffer with other tools, one often gets a
414    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
415    /// This function turns that `&[u8]` into another `Bytes`, as if one had
416    /// called `self.slice()` with the offsets that correspond to `subset`.
417    ///
418    /// This operation is `O(1)`.
419    ///
420    /// # Examples
421    ///
422    /// ```
423    /// use bytes::Bytes;
424    ///
425    /// let bytes = Bytes::from(&b"012345678"[..]);
426    /// let as_slice = bytes.as_ref();
427    /// let subset = &as_slice[2..6];
428    /// let subslice = bytes.slice_ref(&subset);
429    /// assert_eq!(&subslice[..], b"2345");
430    /// ```
431    ///
432    /// # Panics
433    ///
434    /// Requires that the given `sub` slice is in fact contained within the
435    /// `Bytes` buffer; otherwise this function will panic.
436    pub fn slice_ref(&self, subset: &[u8]) -> Self {
437        // Empty slice and empty Bytes may have their pointers reset
438        // so explicitly allow empty slice to be a subslice of any slice.
439        if subset.is_empty() {
440            return Bytes::new();
441        }
442
443        let bytes_p = self.as_ptr() as usize;
444        let bytes_len = self.len();
445
446        let sub_p = subset.as_ptr() as usize;
447        let sub_len = subset.len();
448
449        assert!(
450            sub_p >= bytes_p,
451            "subset pointer ({:p}) is smaller than self pointer ({:p})",
452            subset.as_ptr(),
453            self.as_ptr(),
454        );
455        assert!(
456            sub_p + sub_len <= bytes_p + bytes_len,
457            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
458            self.as_ptr(),
459            bytes_len,
460            subset.as_ptr(),
461            sub_len,
462        );
463
464        let sub_offset = sub_p - bytes_p;
465
466        self.slice(sub_offset..(sub_offset + sub_len))
467    }
468
469    /// Splits the bytes into two at the given index.
470    ///
471    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
472    /// contains elements `[at, len)`. It's guaranteed that the memory does not
473    /// move, that is, the address of `self` does not change, and the address of
474    /// the returned slice is `at` bytes after that.
475    ///
476    /// This is an `O(1)` operation that just increases the reference count and
477    /// sets a few indices.
478    ///
479    /// # Examples
480    ///
481    /// ```
482    /// use bytes::Bytes;
483    ///
484    /// let mut a = Bytes::from(&b"hello world"[..]);
485    /// let b = a.split_off(5);
486    ///
487    /// assert_eq!(&a[..], b"hello");
488    /// assert_eq!(&b[..], b" world");
489    /// ```
490    ///
491    /// # Panics
492    ///
493    /// Panics if `at > len`.
494    #[must_use = "consider Bytes::truncate if you don't need the other half"]
495    pub fn split_off(&mut self, at: usize) -> Self {
496        if at == self.len() {
497            return Bytes::new_empty_with_ptr(self.ptr.wrapping_add(at));
498        }
499
500        if at == 0 {
501            return mem::replace(self, Bytes::new_empty_with_ptr(self.ptr));
502        }
503
504        assert!(
505            at <= self.len(),
506            "split_off out of bounds: {:?} <= {:?}",
507            at,
508            self.len(),
509        );
510
511        let mut ret = self.clone();
512
513        self.len = at;
514
515        unsafe { ret.inc_start(at) };
516
517        ret
518    }
519
520    /// Splits the bytes into two at the given index.
521    ///
522    /// Afterwards `self` contains elements `[at, len)`, and the returned
523    /// `Bytes` contains elements `[0, at)`.
524    ///
525    /// This is an `O(1)` operation that just increases the reference count and
526    /// sets a few indices.
527    ///
528    /// # Examples
529    ///
530    /// ```
531    /// use bytes::Bytes;
532    ///
533    /// let mut a = Bytes::from(&b"hello world"[..]);
534    /// let b = a.split_to(5);
535    ///
536    /// assert_eq!(&a[..], b" world");
537    /// assert_eq!(&b[..], b"hello");
538    /// ```
539    ///
540    /// # Panics
541    ///
542    /// Panics if `at > len`.
543    #[must_use = "consider Bytes::advance if you don't need the other half"]
544    pub fn split_to(&mut self, at: usize) -> Self {
545        if at == self.len() {
546            let end_ptr = self.ptr.wrapping_add(at);
547            return mem::replace(self, Bytes::new_empty_with_ptr(end_ptr));
548        }
549
550        if at == 0 {
551            return Bytes::new_empty_with_ptr(self.ptr);
552        }
553
554        assert!(
555            at <= self.len(),
556            "split_to out of bounds: {:?} <= {:?}",
557            at,
558            self.len(),
559        );
560
561        let mut ret = self.clone();
562
563        unsafe { self.inc_start(at) };
564
565        ret.len = at;
566        ret
567    }
568
569    /// Shortens the buffer, keeping the first `len` bytes and dropping the
570    /// rest.
571    ///
572    /// If `len` is greater than the buffer's current length, this has no
573    /// effect.
574    ///
575    /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
576    /// excess bytes to be returned instead of dropped.
577    ///
578    /// # Examples
579    ///
580    /// ```
581    /// use bytes::Bytes;
582    ///
583    /// let mut buf = Bytes::from(&b"hello world"[..]);
584    /// buf.truncate(5);
585    /// assert_eq!(buf, b"hello"[..]);
586    /// ```
587    #[inline]
588    pub fn truncate(&mut self, len: usize) {
589        if len < self.len {
590            // The Vec "promotable" vtables do not store the capacity,
591            // so we cannot truncate while using this repr. We *have* to
592            // promote using `split_off` so the capacity can be stored.
593            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
594                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
595            {
596                drop(self.split_off(len));
597            } else {
598                self.len = len;
599            }
600        }
601    }
602
603    /// Clears the buffer, removing all data.
604    ///
605    /// # Examples
606    ///
607    /// ```
608    /// use bytes::Bytes;
609    ///
610    /// let mut buf = Bytes::from(&b"hello world"[..]);
611    /// buf.clear();
612    /// assert!(buf.is_empty());
613    /// ```
614    #[inline]
615    pub fn clear(&mut self) {
616        self.truncate(0);
617    }
618
619    /// Try to convert self into `BytesMut`.
620    ///
621    /// If `self` is unique for the entire original buffer, this will succeed
622    /// and return a `BytesMut` with the contents of `self` without copying.
623    /// If `self` is not unique for the entire original buffer, this will fail
624    /// and return self.
625    ///
626    /// This will also always fail if the buffer was constructed via either
627    /// [from_owner](Bytes::from_owner) or [from_static](Bytes::from_static).
628    ///
629    /// # Examples
630    ///
631    /// ```
632    /// use bytes::{Bytes, BytesMut};
633    ///
634    /// let bytes = Bytes::from(b"hello".to_vec());
635    /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..])));
636    /// ```
637    pub fn try_into_mut(self) -> Result<BytesMut, Bytes> {
638        if self.is_unique() {
639            Ok(self.into())
640        } else {
641            Err(self)
642        }
643    }
644
645    #[inline]
646    pub(crate) unsafe fn with_vtable(
647        ptr: *const u8,
648        len: usize,
649        data: AtomicPtr<()>,
650        vtable: &'static Vtable,
651    ) -> Bytes {
652        Bytes {
653            ptr,
654            len,
655            data,
656            vtable,
657        }
658    }
659
660    // private
661
662    #[inline]
663    fn as_slice(&self) -> &[u8] {
664        unsafe { slice::from_raw_parts(self.ptr, self.len) }
665    }
666
667    #[inline]
668    unsafe fn inc_start(&mut self, by: usize) {
669        // should already be asserted, but debug assert for tests
670        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
671        self.len -= by;
672        self.ptr = self.ptr.add(by);
673    }
674}
675
676// Vtable must enforce this behavior
677unsafe impl Send for Bytes {}
678unsafe impl Sync for Bytes {}
679
680impl Drop for Bytes {
681    #[inline]
682    fn drop(&mut self) {
683        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
684    }
685}
686
687impl Clone for Bytes {
688    #[inline]
689    fn clone(&self) -> Bytes {
690        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
691    }
692}
693
694impl Buf for Bytes {
695    #[inline]
696    fn remaining(&self) -> usize {
697        self.len()
698    }
699
700    #[inline]
701    fn chunk(&self) -> &[u8] {
702        self.as_slice()
703    }
704
705    #[inline]
706    fn advance(&mut self, cnt: usize) {
707        assert!(
708            cnt <= self.len(),
709            "cannot advance past `remaining`: {:?} <= {:?}",
710            cnt,
711            self.len(),
712        );
713
714        unsafe {
715            self.inc_start(cnt);
716        }
717    }
718
719    fn copy_to_bytes(&mut self, len: usize) -> Self {
720        self.split_to(len)
721    }
722}
723
724impl Deref for Bytes {
725    type Target = [u8];
726
727    #[inline]
728    fn deref(&self) -> &[u8] {
729        self.as_slice()
730    }
731}
732
733impl AsRef<[u8]> for Bytes {
734    #[inline]
735    fn as_ref(&self) -> &[u8] {
736        self.as_slice()
737    }
738}
739
740impl hash::Hash for Bytes {
741    fn hash<H>(&self, state: &mut H)
742    where
743        H: hash::Hasher,
744    {
745        self.as_slice().hash(state);
746    }
747}
748
749impl Borrow<[u8]> for Bytes {
750    fn borrow(&self) -> &[u8] {
751        self.as_slice()
752    }
753}
754
755impl IntoIterator for Bytes {
756    type Item = u8;
757    type IntoIter = IntoIter<Bytes>;
758
759    fn into_iter(self) -> Self::IntoIter {
760        IntoIter::new(self)
761    }
762}
763
764impl<'a> IntoIterator for &'a Bytes {
765    type Item = &'a u8;
766    type IntoIter = core::slice::Iter<'a, u8>;
767
768    fn into_iter(self) -> Self::IntoIter {
769        self.as_slice().iter()
770    }
771}
772
773impl FromIterator<u8> for Bytes {
774    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
775        Vec::from_iter(into_iter).into()
776    }
777}
778
779// impl Eq
780
781impl PartialEq for Bytes {
782    fn eq(&self, other: &Bytes) -> bool {
783        self.as_slice() == other.as_slice()
784    }
785}
786
787impl PartialOrd for Bytes {
788    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
789        Some(self.cmp(other))
790    }
791}
792
793impl Ord for Bytes {
794    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
795        self.as_slice().cmp(other.as_slice())
796    }
797}
798
799impl Eq for Bytes {}
800
801impl PartialEq<[u8]> for Bytes {
802    fn eq(&self, other: &[u8]) -> bool {
803        self.as_slice() == other
804    }
805}
806
807impl PartialOrd<[u8]> for Bytes {
808    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
809        self.as_slice().partial_cmp(other)
810    }
811}
812
813impl PartialEq<Bytes> for [u8] {
814    fn eq(&self, other: &Bytes) -> bool {
815        *other == *self
816    }
817}
818
819impl PartialOrd<Bytes> for [u8] {
820    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
821        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
822    }
823}
824
825impl PartialEq<str> for Bytes {
826    fn eq(&self, other: &str) -> bool {
827        self.as_slice() == other.as_bytes()
828    }
829}
830
831impl PartialOrd<str> for Bytes {
832    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
833        self.as_slice().partial_cmp(other.as_bytes())
834    }
835}
836
837impl PartialEq<Bytes> for str {
838    fn eq(&self, other: &Bytes) -> bool {
839        *other == *self
840    }
841}
842
843impl PartialOrd<Bytes> for str {
844    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
845        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
846    }
847}
848
849impl PartialEq<Vec<u8>> for Bytes {
850    fn eq(&self, other: &Vec<u8>) -> bool {
851        *self == other[..]
852    }
853}
854
855impl PartialOrd<Vec<u8>> for Bytes {
856    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
857        self.as_slice().partial_cmp(&other[..])
858    }
859}
860
861impl PartialEq<Bytes> for Vec<u8> {
862    fn eq(&self, other: &Bytes) -> bool {
863        *other == *self
864    }
865}
866
867impl PartialOrd<Bytes> for Vec<u8> {
868    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
869        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
870    }
871}
872
873impl PartialEq<String> for Bytes {
874    fn eq(&self, other: &String) -> bool {
875        *self == other[..]
876    }
877}
878
879impl PartialOrd<String> for Bytes {
880    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
881        self.as_slice().partial_cmp(other.as_bytes())
882    }
883}
884
885impl PartialEq<Bytes> for String {
886    fn eq(&self, other: &Bytes) -> bool {
887        *other == *self
888    }
889}
890
891impl PartialOrd<Bytes> for String {
892    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
893        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
894    }
895}
896
897impl PartialEq<Bytes> for &[u8] {
898    fn eq(&self, other: &Bytes) -> bool {
899        *other == *self
900    }
901}
902
903impl PartialOrd<Bytes> for &[u8] {
904    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
905        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
906    }
907}
908
909impl PartialEq<Bytes> for &str {
910    fn eq(&self, other: &Bytes) -> bool {
911        *other == *self
912    }
913}
914
915impl PartialOrd<Bytes> for &str {
916    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
917        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
918    }
919}
920
921impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
922where
923    Bytes: PartialEq<T>,
924{
925    fn eq(&self, other: &&'a T) -> bool {
926        *self == **other
927    }
928}
929
930impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
931where
932    Bytes: PartialOrd<T>,
933{
934    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
935        self.partial_cmp(&**other)
936    }
937}
938
939// impl From
940
941impl Default for Bytes {
942    #[inline]
943    fn default() -> Bytes {
944        Bytes::new()
945    }
946}
947
948impl From<&'static [u8]> for Bytes {
949    fn from(slice: &'static [u8]) -> Bytes {
950        Bytes::from_static(slice)
951    }
952}
953
954impl From<&'static str> for Bytes {
955    fn from(slice: &'static str) -> Bytes {
956        Bytes::from_static(slice.as_bytes())
957    }
958}
959
960impl From<Vec<u8>> for Bytes {
961    fn from(vec: Vec<u8>) -> Bytes {
962        let mut vec = ManuallyDrop::new(vec);
963        let ptr = vec.as_mut_ptr();
964        let len = vec.len();
965        let cap = vec.capacity();
966
967        // Avoid an extra allocation if possible.
968        if len == cap {
969            let vec = ManuallyDrop::into_inner(vec);
970            return Bytes::from(vec.into_boxed_slice());
971        }
972
973        let shared = Box::new(Shared {
974            buf: ptr,
975            cap,
976            ref_cnt: AtomicUsize::new(1),
977        });
978
979        let shared = Box::into_raw(shared);
980        // The pointer should be aligned, so this assert should
981        // always succeed.
982        debug_assert!(
983            0 == (shared as usize & KIND_MASK),
984            "internal: Box<Shared> should have an aligned pointer",
985        );
986        Bytes {
987            ptr,
988            len,
989            data: AtomicPtr::new(shared as _),
990            vtable: &SHARED_VTABLE,
991        }
992    }
993}
994
995impl From<Box<[u8]>> for Bytes {
996    fn from(slice: Box<[u8]>) -> Bytes {
997        // Box<[u8]> doesn't contain a heap allocation for empty slices,
998        // so the pointer isn't aligned enough for the KIND_VEC stashing to
999        // work.
1000        if slice.is_empty() {
1001            return Bytes::new();
1002        }
1003
1004        let len = slice.len();
1005        let ptr = Box::into_raw(slice) as *mut u8;
1006
1007        if ptr as usize & 0x1 == 0 {
1008            let data = ptr_map(ptr, |addr| addr | KIND_VEC);
1009            Bytes {
1010                ptr,
1011                len,
1012                data: AtomicPtr::new(data.cast()),
1013                vtable: &PROMOTABLE_EVEN_VTABLE,
1014            }
1015        } else {
1016            Bytes {
1017                ptr,
1018                len,
1019                data: AtomicPtr::new(ptr.cast()),
1020                vtable: &PROMOTABLE_ODD_VTABLE,
1021            }
1022        }
1023    }
1024}
1025
1026impl From<Bytes> for BytesMut {
1027    /// Convert self into `BytesMut`.
1028    ///
1029    /// If `bytes` is unique for the entire original buffer, this will return a
1030    /// `BytesMut` with the contents of `bytes` without copying.
1031    /// If `bytes` is not unique for the entire original buffer, this will make
1032    /// a copy of `bytes` subset of the original buffer in a new `BytesMut`.
1033    ///
1034    /// # Examples
1035    ///
1036    /// ```
1037    /// use bytes::{Bytes, BytesMut};
1038    ///
1039    /// let bytes = Bytes::from(b"hello".to_vec());
1040    /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..]));
1041    /// ```
1042    fn from(bytes: Bytes) -> Self {
1043        let bytes = ManuallyDrop::new(bytes);
1044        unsafe { (bytes.vtable.into_mut)(&bytes.data, bytes.ptr, bytes.len) }
1045    }
1046}
1047
1048impl From<String> for Bytes {
1049    fn from(s: String) -> Bytes {
1050        Bytes::from(s.into_bytes())
1051    }
1052}
1053
1054impl From<Bytes> for Vec<u8> {
1055    fn from(bytes: Bytes) -> Vec<u8> {
1056        let bytes = ManuallyDrop::new(bytes);
1057        unsafe { (bytes.vtable.into_vec)(&bytes.data, bytes.ptr, bytes.len) }
1058    }
1059}
1060
1061// ===== impl Vtable =====
1062
1063impl fmt::Debug for Vtable {
1064    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1065        f.debug_struct("Vtable")
1066            .field("clone", &(self.clone as *const ()))
1067            .field("drop", &(self.drop as *const ()))
1068            .finish()
1069    }
1070}
1071
1072// ===== impl StaticVtable =====
1073
1074const STATIC_VTABLE: Vtable = Vtable {
1075    clone: static_clone,
1076    into_vec: static_to_vec,
1077    into_mut: static_to_mut,
1078    is_unique: static_is_unique,
1079    drop: static_drop,
1080};
1081
1082unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1083    let slice = slice::from_raw_parts(ptr, len);
1084    Bytes::from_static(slice)
1085}
1086
1087unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1088    let slice = slice::from_raw_parts(ptr, len);
1089    slice.to_vec()
1090}
1091
1092unsafe fn static_to_mut(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1093    let slice = slice::from_raw_parts(ptr, len);
1094    BytesMut::from(slice)
1095}
1096
1097fn static_is_unique(_: &AtomicPtr<()>) -> bool {
1098    false
1099}
1100
1101unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
1102    // nothing to drop for &'static [u8]
1103}
1104
1105// ===== impl OwnedVtable =====
1106
1107#[repr(C)]
1108struct Owned<T> {
1109    ref_cnt: AtomicUsize,
1110    owner: T,
1111}
1112
1113impl<T> Owned<T> {
1114    const VTABLE: Vtable = Vtable {
1115        clone: owned_clone::<T>,
1116        into_vec: owned_to_vec::<T>,
1117        into_mut: owned_to_mut::<T>,
1118        is_unique: owned_is_unique,
1119        drop: owned_drop::<T>,
1120    };
1121}
1122
1123unsafe fn owned_clone<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1124    let owned = data.load(Ordering::Relaxed);
1125    let old_cnt = (*owned.cast::<AtomicUsize>()).fetch_add(1, Ordering::Relaxed);
1126    if old_cnt > usize::MAX >> 1 {
1127        crate::abort();
1128    }
1129
1130    Bytes {
1131        ptr,
1132        len,
1133        data: AtomicPtr::new(owned as _),
1134        vtable: &Owned::<T>::VTABLE,
1135    }
1136}
1137
1138unsafe fn owned_to_vec<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1139    let slice = slice::from_raw_parts(ptr, len);
1140    let vec = slice.to_vec();
1141    owned_drop_impl::<T>(data.load(Ordering::Relaxed));
1142    vec
1143}
1144
1145unsafe fn owned_to_mut<T>(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1146    BytesMut::from_vec(owned_to_vec::<T>(data, ptr, len))
1147}
1148
1149unsafe fn owned_is_unique(_data: &AtomicPtr<()>) -> bool {
1150    false
1151}
1152
1153unsafe fn owned_drop_impl<T>(owned: *mut ()) {
1154    {
1155        let ref_cnt = &*owned.cast::<AtomicUsize>();
1156
1157        let old_cnt = ref_cnt.fetch_sub(1, Ordering::Release);
1158        debug_assert!(
1159            old_cnt > 0 && old_cnt <= usize::MAX >> 1,
1160            "expected non-zero refcount and no underflow"
1161        );
1162        if old_cnt != 1 {
1163            return;
1164        }
1165        ref_cnt.load(Ordering::Acquire);
1166    }
1167
1168    drop(Box::<Owned<T>>::from_raw(owned.cast()));
1169}
1170
1171unsafe fn owned_drop<T>(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1172    let owned = data.load(Ordering::Relaxed);
1173    owned_drop_impl::<T>(owned);
1174}
1175
1176// ===== impl PromotableVtable =====
1177
1178static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
1179    clone: promotable_even_clone,
1180    into_vec: promotable_even_to_vec,
1181    into_mut: promotable_even_to_mut,
1182    is_unique: promotable_is_unique,
1183    drop: promotable_even_drop,
1184};
1185
1186static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
1187    clone: promotable_odd_clone,
1188    into_vec: promotable_odd_to_vec,
1189    into_mut: promotable_odd_to_mut,
1190    is_unique: promotable_is_unique,
1191    drop: promotable_odd_drop,
1192};
1193
1194unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1195    let shared = data.load(Ordering::Acquire);
1196    let kind = shared as usize & KIND_MASK;
1197
1198    if kind == KIND_ARC {
1199        shallow_clone_arc(shared.cast(), ptr, len)
1200    } else {
1201        debug_assert_eq!(kind, KIND_VEC);
1202        let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1203        shallow_clone_vec(data, shared, buf, ptr, len)
1204    }
1205}
1206
1207unsafe fn promotable_to_vec(
1208    data: &AtomicPtr<()>,
1209    ptr: *const u8,
1210    len: usize,
1211    f: fn(*mut ()) -> *mut u8,
1212) -> Vec<u8> {
1213    let shared = data.load(Ordering::Acquire);
1214    let kind = shared as usize & KIND_MASK;
1215
1216    if kind == KIND_ARC {
1217        shared_to_vec_impl(shared.cast(), ptr, len)
1218    } else {
1219        // If Bytes holds a Vec, then the offset must be 0.
1220        debug_assert_eq!(kind, KIND_VEC);
1221
1222        let buf = f(shared);
1223
1224        let cap = ptr.offset_from(buf) as usize + len;
1225
1226        // Copy back buffer
1227        ptr::copy(ptr, buf, len);
1228
1229        Vec::from_raw_parts(buf, len, cap)
1230    }
1231}
1232
1233unsafe fn promotable_to_mut(
1234    data: &AtomicPtr<()>,
1235    ptr: *const u8,
1236    len: usize,
1237    f: fn(*mut ()) -> *mut u8,
1238) -> BytesMut {
1239    let shared = data.load(Ordering::Acquire);
1240    let kind = shared as usize & KIND_MASK;
1241
1242    if kind == KIND_ARC {
1243        shared_to_mut_impl(shared.cast(), ptr, len)
1244    } else {
1245        // KIND_VEC is a view of an underlying buffer at a certain offset.
1246        // The ptr + len always represents the end of that buffer.
1247        // Before truncating it, it is first promoted to KIND_ARC.
1248        // Thus, we can safely reconstruct a Vec from it without leaking memory.
1249        debug_assert_eq!(kind, KIND_VEC);
1250
1251        let buf = f(shared);
1252        let off = ptr.offset_from(buf) as usize;
1253        let cap = off + len;
1254        let v = Vec::from_raw_parts(buf, cap, cap);
1255
1256        let mut b = BytesMut::from_vec(v);
1257        b.advance_unchecked(off);
1258        b
1259    }
1260}
1261
1262unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1263    promotable_to_vec(data, ptr, len, |shared| {
1264        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1265    })
1266}
1267
1268unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1269    promotable_to_mut(data, ptr, len, |shared| {
1270        ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
1271    })
1272}
1273
1274unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1275    data.with_mut(|shared| {
1276        let shared = *shared;
1277        let kind = shared as usize & KIND_MASK;
1278
1279        if kind == KIND_ARC {
1280            release_shared(shared.cast());
1281        } else {
1282            debug_assert_eq!(kind, KIND_VEC);
1283            let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
1284            free_boxed_slice(buf, ptr, len);
1285        }
1286    });
1287}
1288
1289unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1290    let shared = data.load(Ordering::Acquire);
1291    let kind = shared as usize & KIND_MASK;
1292
1293    if kind == KIND_ARC {
1294        shallow_clone_arc(shared as _, ptr, len)
1295    } else {
1296        debug_assert_eq!(kind, KIND_VEC);
1297        shallow_clone_vec(data, shared, shared.cast(), ptr, len)
1298    }
1299}
1300
1301unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1302    promotable_to_vec(data, ptr, len, |shared| shared.cast())
1303}
1304
1305unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1306    promotable_to_mut(data, ptr, len, |shared| shared.cast())
1307}
1308
1309unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
1310    data.with_mut(|shared| {
1311        let shared = *shared;
1312        let kind = shared as usize & KIND_MASK;
1313
1314        if kind == KIND_ARC {
1315            release_shared(shared.cast());
1316        } else {
1317            debug_assert_eq!(kind, KIND_VEC);
1318
1319            free_boxed_slice(shared.cast(), ptr, len);
1320        }
1321    });
1322}
1323
1324unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool {
1325    let shared = data.load(Ordering::Acquire);
1326    let kind = shared as usize & KIND_MASK;
1327
1328    if kind == KIND_ARC {
1329        let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1330        ref_cnt == 1
1331    } else {
1332        true
1333    }
1334}
1335
1336unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
1337    let cap = offset.offset_from(buf) as usize + len;
1338    dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
1339}
1340
1341// ===== impl SharedVtable =====
1342
1343struct Shared {
1344    // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
1345    buf: *mut u8,
1346    cap: usize,
1347    ref_cnt: AtomicUsize,
1348}
1349
1350impl Drop for Shared {
1351    fn drop(&mut self) {
1352        unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
1353    }
1354}
1355
1356// Assert that the alignment of `Shared` is divisible by 2.
1357// This is a necessary invariant since we depend on allocating `Shared` a
1358// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
1359// This flag is set when the LSB is 0.
1360const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
1361
1362static SHARED_VTABLE: Vtable = Vtable {
1363    clone: shared_clone,
1364    into_vec: shared_to_vec,
1365    into_mut: shared_to_mut,
1366    is_unique: shared_is_unique,
1367    drop: shared_drop,
1368};
1369
1370const KIND_ARC: usize = 0b0;
1371const KIND_VEC: usize = 0b1;
1372const KIND_MASK: usize = 0b1;
1373
1374unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1375    let shared = data.load(Ordering::Relaxed);
1376    shallow_clone_arc(shared as _, ptr, len)
1377}
1378
1379unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
1380    // Check that the ref_cnt is 1 (unique).
1381    //
1382    // If it is unique, then it is set to 0 with AcqRel fence for the same
1383    // reason in release_shared.
1384    //
1385    // Otherwise, we take the other branch and call release_shared.
1386    if (*shared)
1387        .ref_cnt
1388        .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
1389        .is_ok()
1390    {
1391        // Deallocate the `Shared` instance without running its destructor.
1392        let shared = *Box::from_raw(shared);
1393        let shared = ManuallyDrop::new(shared);
1394        let buf = shared.buf;
1395        let cap = shared.cap;
1396
1397        // Copy back buffer
1398        ptr::copy(ptr, buf, len);
1399
1400        Vec::from_raw_parts(buf, len, cap)
1401    } else {
1402        let v = slice::from_raw_parts(ptr, len).to_vec();
1403        release_shared(shared);
1404        v
1405    }
1406}
1407
1408unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1409    shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1410}
1411
1412unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut {
1413    // The goal is to check if the current handle is the only handle
1414    // that currently has access to the buffer. This is done by
1415    // checking if the `ref_cnt` is currently 1.
1416    //
1417    // The `Acquire` ordering synchronizes with the `Release` as
1418    // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1419    // operation guarantees that any mutations done in other threads
1420    // are ordered before the `ref_cnt` is decremented. As such,
1421    // this `Acquire` will guarantee that those mutations are
1422    // visible to the current thread.
1423    //
1424    // Otherwise, we take the other branch, copy the data and call `release_shared`.
1425    if (*shared).ref_cnt.load(Ordering::Acquire) == 1 {
1426        // Deallocate the `Shared` instance without running its destructor.
1427        let shared = *Box::from_raw(shared);
1428        let shared = ManuallyDrop::new(shared);
1429        let buf = shared.buf;
1430        let cap = shared.cap;
1431
1432        // Rebuild Vec
1433        let off = ptr.offset_from(buf) as usize;
1434        let v = Vec::from_raw_parts(buf, len + off, cap);
1435
1436        let mut b = BytesMut::from_vec(v);
1437        b.advance_unchecked(off);
1438        b
1439    } else {
1440        // Copy the data from Shared in a new Vec, then release it
1441        let v = slice::from_raw_parts(ptr, len).to_vec();
1442        release_shared(shared);
1443        BytesMut::from_vec(v)
1444    }
1445}
1446
1447unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1448    shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
1449}
1450
1451pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool {
1452    let shared = data.load(Ordering::Acquire);
1453    let ref_cnt = (*shared.cast::<Shared>()).ref_cnt.load(Ordering::Relaxed);
1454    ref_cnt == 1
1455}
1456
1457unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1458    data.with_mut(|shared| {
1459        release_shared(shared.cast());
1460    });
1461}
1462
1463unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
1464    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
1465
1466    if old_size > usize::MAX >> 1 {
1467        crate::abort();
1468    }
1469
1470    Bytes {
1471        ptr,
1472        len,
1473        data: AtomicPtr::new(shared as _),
1474        vtable: &SHARED_VTABLE,
1475    }
1476}
1477
1478#[cold]
1479unsafe fn shallow_clone_vec(
1480    atom: &AtomicPtr<()>,
1481    ptr: *const (),
1482    buf: *mut u8,
1483    offset: *const u8,
1484    len: usize,
1485) -> Bytes {
1486    // If the buffer is still tracked in a `Vec<u8>`. It is time to
1487    // promote the vec to an `Arc`. This could potentially be called
1488    // concurrently, so some care must be taken.
1489
1490    // First, allocate a new `Shared` instance containing the
1491    // `Vec` fields. It's important to note that `ptr`, `len`,
1492    // and `cap` cannot be mutated without having `&mut self`.
1493    // This means that these fields will not be concurrently
1494    // updated and since the buffer hasn't been promoted to an
1495    // `Arc`, those three fields still are the components of the
1496    // vector.
1497    let shared = Box::new(Shared {
1498        buf,
1499        cap: offset.offset_from(buf) as usize + len,
1500        // Initialize refcount to 2. One for this reference, and one
1501        // for the new clone that will be returned from
1502        // `shallow_clone`.
1503        ref_cnt: AtomicUsize::new(2),
1504    });
1505
1506    let shared = Box::into_raw(shared);
1507
1508    // The pointer should be aligned, so this assert should
1509    // always succeed.
1510    debug_assert!(
1511        0 == (shared as usize & KIND_MASK),
1512        "internal: Box<Shared> should have an aligned pointer",
1513    );
1514
1515    // Try compare & swapping the pointer into the `arc` field.
1516    // `Release` is used synchronize with other threads that
1517    // will load the `arc` field.
1518    //
1519    // If the `compare_exchange` fails, then the thread lost the
1520    // race to promote the buffer to shared. The `Acquire`
1521    // ordering will synchronize with the `compare_exchange`
1522    // that happened in the other thread and the `Shared`
1523    // pointed to by `actual` will be visible.
1524    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1525        Ok(actual) => {
1526            debug_assert!(core::ptr::eq(actual, ptr));
1527            // The upgrade was successful, the new handle can be
1528            // returned.
1529            Bytes {
1530                ptr: offset,
1531                len,
1532                data: AtomicPtr::new(shared as _),
1533                vtable: &SHARED_VTABLE,
1534            }
1535        }
1536        Err(actual) => {
1537            // The upgrade failed, a concurrent clone happened. Release
1538            // the allocation that was made in this thread, it will not
1539            // be needed.
1540            let shared = Box::from_raw(shared);
1541            mem::forget(*shared);
1542
1543            // Buffer already promoted to shared storage, so increment ref
1544            // count.
1545            shallow_clone_arc(actual as _, offset, len)
1546        }
1547    }
1548}
1549
1550unsafe fn release_shared(ptr: *mut Shared) {
1551    // `Shared` storage... follow the drop steps from Arc.
1552    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1553        return;
1554    }
1555
1556    // This fence is needed to prevent reordering of use of the data and
1557    // deletion of the data.  Because it is marked `Release`, the decreasing
1558    // of the reference count synchronizes with this `Acquire` fence. This
1559    // means that use of the data happens before decreasing the reference
1560    // count, which happens before this fence, which happens before the
1561    // deletion of the data.
1562    //
1563    // As explained in the [Boost documentation][1],
1564    //
1565    // > It is important to enforce any possible access to the object in one
1566    // > thread (through an existing reference) to *happen before* deleting
1567    // > the object in a different thread. This is achieved by a "release"
1568    // > operation after dropping a reference (any access to the object
1569    // > through this reference must obviously happened before), and an
1570    // > "acquire" operation before deleting the object.
1571    //
1572    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1573    //
1574    // Thread sanitizer does not support atomic fences. Use an atomic load
1575    // instead.
1576    (*ptr).ref_cnt.load(Ordering::Acquire);
1577
1578    // Drop the data
1579    drop(Box::from_raw(ptr));
1580}
1581
1582// Ideally we would always use this version of `ptr_map` since it is strict
1583// provenance compatible, but it results in worse codegen. We will however still
1584// use it on miri because it gives better diagnostics for people who test bytes
1585// code with miri.
1586//
1587// See https://github.com/tokio-rs/bytes/pull/545 for more info.
1588#[cfg(miri)]
1589fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1590where
1591    F: FnOnce(usize) -> usize,
1592{
1593    let old_addr = ptr as usize;
1594    let new_addr = f(old_addr);
1595    let diff = new_addr.wrapping_sub(old_addr);
1596    ptr.wrapping_add(diff)
1597}
1598
1599#[cfg(not(miri))]
1600fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
1601where
1602    F: FnOnce(usize) -> usize,
1603{
1604    let old_addr = ptr as usize;
1605    let new_addr = f(old_addr);
1606    new_addr as *mut u8
1607}
1608
1609fn without_provenance(ptr: usize) -> *const u8 {
1610    core::ptr::null::<u8>().wrapping_add(ptr)
1611}
1612
1613// compile-fails
1614
1615/// ```compile_fail
1616/// use bytes::Bytes;
1617/// #[deny(unused_must_use)]
1618/// {
1619///     let mut b1 = Bytes::from("hello world");
1620///     b1.split_to(6);
1621/// }
1622/// ```
1623fn _split_to_must_use() {}
1624
1625/// ```compile_fail
1626/// use bytes::Bytes;
1627/// #[deny(unused_must_use)]
1628/// {
1629///     let mut b1 = Bytes::from("hello world");
1630///     b1.split_off(6);
1631/// }
1632/// ```
1633fn _split_off_must_use() {}
1634
1635// fuzz tests
1636#[cfg(all(test, loom))]
1637mod fuzz {
1638    use loom::sync::Arc;
1639    use loom::thread;
1640
1641    use super::Bytes;
1642    #[test]
1643    fn bytes_cloning_vec() {
1644        loom::model(|| {
1645            let a = Bytes::from(b"abcdefgh".to_vec());
1646            let addr = a.as_ptr() as usize;
1647
1648            // test the Bytes::clone is Sync by putting it in an Arc
1649            let a1 = Arc::new(a);
1650            let a2 = a1.clone();
1651
1652            let t1 = thread::spawn(move || {
1653                let b: Bytes = (*a1).clone();
1654                assert_eq!(b.as_ptr() as usize, addr);
1655            });
1656
1657            let t2 = thread::spawn(move || {
1658                let b: Bytes = (*a2).clone();
1659                assert_eq!(b.as_ptr() as usize, addr);
1660            });
1661
1662            t1.join().unwrap();
1663            t2.join().unwrap();
1664        });
1665    }
1666}