tiny_artnet_bytes_no_atomic/
bytes.rs

1use core::iter::FromIterator;
2use core::ops::{Deref, RangeBounds};
3use core::{cmp, fmt, hash, mem, ptr, slice, usize};
4
5use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec};
6
7use crate::buf::IntoIter;
8#[allow(unused)]
9use crate::loom::sync::atomic::AtomicMut;
10use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
11use crate::Buf;
12
13/// A cheaply cloneable and sliceable chunk of contiguous memory.
14///
15/// `Bytes` is an efficient container for storing and operating on contiguous
16/// slices of memory. It is intended for use primarily in networking code, but
17/// could have applications elsewhere as well.
18///
19/// `Bytes` values facilitate zero-copy network programming by allowing multiple
20/// `Bytes` objects to point to the same underlying memory.
21///
22/// `Bytes` does not have a single implementation. It is an interface, whose
23/// exact behavior is implemented through dynamic dispatch in several underlying
24/// implementations of `Bytes`.
25///
26/// All `Bytes` implementations must fulfill the following requirements:
27/// - They are cheaply cloneable and thereby shareable between an unlimited amount
28///   of components, for example by modifying a reference count.
29/// - Instances can be sliced to refer to a subset of the the original buffer.
30///
31/// ```
32/// use bytes::Bytes;
33///
34/// let mut mem = Bytes::from("Hello world");
35/// let a = mem.slice(0..5);
36///
37/// assert_eq!(a, "Hello");
38///
39/// let b = mem.split_to(6);
40///
41/// assert_eq!(mem, "world");
42/// assert_eq!(b, "Hello ");
43/// ```
44///
45/// # Memory layout
46///
47/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used
48/// to track information about which segment of the underlying memory the
49/// `Bytes` handle has access to.
50///
51/// `Bytes` keeps both a pointer to the shared state containing the full memory
52/// slice and a pointer to the start of the region visible by the handle.
53/// `Bytes` also tracks the length of its view into the memory.
54///
55/// # Sharing
56///
57/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
58/// how sharing/cloneing is implemented in detail.
59/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
60/// cloning the backing storage in order to share it behind between multiple
61/// `Bytes` instances.
62///
63/// For `Bytes` implementations which refer to constant memory (e.g. created
64/// via `Bytes::from_static()`) the cloning implementation will be a no-op.
65///
66/// For `Bytes` implementations which point to a reference counted shared storage
67/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
68/// the reference count.
69///
70/// Due to this mechanism, multiple `Bytes` instances may point to the same
71/// shared memory region.
72/// Each `Bytes` instance can point to different sections within that
73/// memory region, and `Bytes` instances may or may not have overlapping views
74/// into the memory.
75///
76/// The following diagram visualizes a scenario where 2 `Bytes` instances make
77/// use of an `Arc`-based backing storage, and provide access to different views:
78///
79/// ```text
80///
81///    Arc ptrs                   +---------+
82///    ________________________ / | Bytes 2 |
83///   /                           +---------+
84///  /          +-----------+     |         |
85/// |_________/ |  Bytes 1  |     |         |
86/// |           +-----------+     |         |
87/// |           |           | ___/ data     | tail
88/// |      data |      tail |/              |
89/// v           v           v               v
90/// +-----+---------------------------------+-----+
91/// | Arc |     |           |               |     |
92/// +-----+---------------------------------+-----+
93/// ```
94pub struct Bytes {
95    ptr: *const u8,
96    len: usize,
97    // inlined "trait object"
98    data: AtomicPtr<()>,
99    vtable: &'static Vtable,
100}
101
102pub(crate) struct Vtable {
103    /// fn(data, ptr, len)
104    pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
105    /// fn(data, ptr, len)
106    pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
107}
108
109impl Bytes {
110    /// Creates a new empty `Bytes`.
111    ///
112    /// This will not allocate and the returned `Bytes` handle will be empty.
113    ///
114    /// # Examples
115    ///
116    /// ```
117    /// use bytes::Bytes;
118    ///
119    /// let b = Bytes::new();
120    /// assert_eq!(&b[..], b"");
121    /// ```
122    #[inline]
123    #[cfg(not(all(loom, test)))]
124    pub const fn new() -> Bytes {
125        // Make it a named const to work around
126        // "unsizing casts are not allowed in const fn"
127        const EMPTY: &[u8] = &[];
128        Bytes::from_static(EMPTY)
129    }
130
131    #[cfg(all(loom, test))]
132    pub fn new() -> Bytes {
133        const EMPTY: &[u8] = &[];
134        Bytes::from_static(EMPTY)
135    }
136
137    /// Creates a new `Bytes` from a static slice.
138    ///
139    /// The returned `Bytes` will point directly to the static slice. There is
140    /// no allocating or copying.
141    ///
142    /// # Examples
143    ///
144    /// ```
145    /// use bytes::Bytes;
146    ///
147    /// let b = Bytes::from_static(b"hello");
148    /// assert_eq!(&b[..], b"hello");
149    /// ```
150    #[inline]
151    #[cfg(not(all(loom, test)))]
152    pub const fn from_static(bytes: &'static [u8]) -> Bytes {
153        Bytes {
154            ptr: bytes.as_ptr(),
155            len: bytes.len(),
156            data: AtomicPtr::new(ptr::null_mut()),
157            vtable: &STATIC_VTABLE,
158        }
159    }
160
161    #[cfg(all(loom, test))]
162    pub fn from_static(bytes: &'static [u8]) -> Bytes {
163        Bytes {
164            ptr: bytes.as_ptr(),
165            len: bytes.len(),
166            data: AtomicPtr::new(ptr::null_mut()),
167            vtable: &STATIC_VTABLE,
168        }
169    }
170
171    /// Returns the number of bytes contained in this `Bytes`.
172    ///
173    /// # Examples
174    ///
175    /// ```
176    /// use bytes::Bytes;
177    ///
178    /// let b = Bytes::from(&b"hello"[..]);
179    /// assert_eq!(b.len(), 5);
180    /// ```
181    #[inline]
182    pub fn len(&self) -> usize {
183        self.len
184    }
185
186    /// Returns true if the `Bytes` has a length of 0.
187    ///
188    /// # Examples
189    ///
190    /// ```
191    /// use bytes::Bytes;
192    ///
193    /// let b = Bytes::new();
194    /// assert!(b.is_empty());
195    /// ```
196    #[inline]
197    pub fn is_empty(&self) -> bool {
198        self.len == 0
199    }
200
201    /// Creates `Bytes` instance from slice, by copying it.
202    pub fn copy_from_slice(data: &[u8]) -> Self {
203        data.to_vec().into()
204    }
205
206    /// Returns a slice of self for the provided range.
207    ///
208    /// This will increment the reference count for the underlying memory and
209    /// return a new `Bytes` handle set to the slice.
210    ///
211    /// This operation is `O(1)`.
212    ///
213    /// # Examples
214    ///
215    /// ```
216    /// use bytes::Bytes;
217    ///
218    /// let a = Bytes::from(&b"hello world"[..]);
219    /// let b = a.slice(2..5);
220    ///
221    /// assert_eq!(&b[..], b"llo");
222    /// ```
223    ///
224    /// # Panics
225    ///
226    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
227    /// will panic.
228    pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
229        use core::ops::Bound;
230
231        let len = self.len();
232
233        let begin = match range.start_bound() {
234            Bound::Included(&n) => n,
235            Bound::Excluded(&n) => n + 1,
236            Bound::Unbounded => 0,
237        };
238
239        let end = match range.end_bound() {
240            Bound::Included(&n) => n.checked_add(1).expect("out of range"),
241            Bound::Excluded(&n) => n,
242            Bound::Unbounded => len,
243        };
244
245        assert!(
246            begin <= end,
247            "range start must not be greater than end: {:?} <= {:?}",
248            begin,
249            end,
250        );
251        assert!(
252            end <= len,
253            "range end out of bounds: {:?} <= {:?}",
254            end,
255            len,
256        );
257
258        if end == begin {
259            return Bytes::new();
260        }
261
262        let mut ret = self.clone();
263
264        ret.len = end - begin;
265        ret.ptr = unsafe { ret.ptr.offset(begin as isize) };
266
267        ret
268    }
269
270    /// Returns a slice of self that is equivalent to the given `subset`.
271    ///
272    /// When processing a `Bytes` buffer with other tools, one often gets a
273    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
274    /// This function turns that `&[u8]` into another `Bytes`, as if one had
275    /// called `self.slice()` with the offsets that correspond to `subset`.
276    ///
277    /// This operation is `O(1)`.
278    ///
279    /// # Examples
280    ///
281    /// ```
282    /// use bytes::Bytes;
283    ///
284    /// let bytes = Bytes::from(&b"012345678"[..]);
285    /// let as_slice = bytes.as_ref();
286    /// let subset = &as_slice[2..6];
287    /// let subslice = bytes.slice_ref(&subset);
288    /// assert_eq!(&subslice[..], b"2345");
289    /// ```
290    ///
291    /// # Panics
292    ///
293    /// Requires that the given `sub` slice is in fact contained within the
294    /// `Bytes` buffer; otherwise this function will panic.
295    pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
296        // Empty slice and empty Bytes may have their pointers reset
297        // so explicitly allow empty slice to be a subslice of any slice.
298        if subset.is_empty() {
299            return Bytes::new();
300        }
301
302        let bytes_p = self.as_ptr() as usize;
303        let bytes_len = self.len();
304
305        let sub_p = subset.as_ptr() as usize;
306        let sub_len = subset.len();
307
308        assert!(
309            sub_p >= bytes_p,
310            "subset pointer ({:p}) is smaller than self pointer ({:p})",
311            sub_p as *const u8,
312            bytes_p as *const u8,
313        );
314        assert!(
315            sub_p + sub_len <= bytes_p + bytes_len,
316            "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
317            bytes_p as *const u8,
318            bytes_len,
319            sub_p as *const u8,
320            sub_len,
321        );
322
323        let sub_offset = sub_p - bytes_p;
324
325        self.slice(sub_offset..(sub_offset + sub_len))
326    }
327
328    /// Splits the bytes into two at the given index.
329    ///
330    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
331    /// contains elements `[at, len)`.
332    ///
333    /// This is an `O(1)` operation that just increases the reference count and
334    /// sets a few indices.
335    ///
336    /// # Examples
337    ///
338    /// ```
339    /// use bytes::Bytes;
340    ///
341    /// let mut a = Bytes::from(&b"hello world"[..]);
342    /// let b = a.split_off(5);
343    ///
344    /// assert_eq!(&a[..], b"hello");
345    /// assert_eq!(&b[..], b" world");
346    /// ```
347    ///
348    /// # Panics
349    ///
350    /// Panics if `at > len`.
351    #[must_use = "consider Bytes::truncate if you don't need the other half"]
352    pub fn split_off(&mut self, at: usize) -> Bytes {
353        assert!(
354            at <= self.len(),
355            "split_off out of bounds: {:?} <= {:?}",
356            at,
357            self.len(),
358        );
359
360        if at == self.len() {
361            return Bytes::new();
362        }
363
364        if at == 0 {
365            return mem::replace(self, Bytes::new());
366        }
367
368        let mut ret = self.clone();
369
370        self.len = at;
371
372        unsafe { ret.inc_start(at) };
373
374        ret
375    }
376
377    /// Splits the bytes into two at the given index.
378    ///
379    /// Afterwards `self` contains elements `[at, len)`, and the returned
380    /// `Bytes` contains elements `[0, at)`.
381    ///
382    /// This is an `O(1)` operation that just increases the reference count and
383    /// sets a few indices.
384    ///
385    /// # Examples
386    ///
387    /// ```
388    /// use bytes::Bytes;
389    ///
390    /// let mut a = Bytes::from(&b"hello world"[..]);
391    /// let b = a.split_to(5);
392    ///
393    /// assert_eq!(&a[..], b" world");
394    /// assert_eq!(&b[..], b"hello");
395    /// ```
396    ///
397    /// # Panics
398    ///
399    /// Panics if `at > len`.
400    #[must_use = "consider Bytes::advance if you don't need the other half"]
401    pub fn split_to(&mut self, at: usize) -> Bytes {
402        assert!(
403            at <= self.len(),
404            "split_to out of bounds: {:?} <= {:?}",
405            at,
406            self.len(),
407        );
408
409        if at == self.len() {
410            return mem::replace(self, Bytes::new());
411        }
412
413        if at == 0 {
414            return Bytes::new();
415        }
416
417        let mut ret = self.clone();
418
419        unsafe { self.inc_start(at) };
420
421        ret.len = at;
422        ret
423    }
424
425    /// Shortens the buffer, keeping the first `len` bytes and dropping the
426    /// rest.
427    ///
428    /// If `len` is greater than the buffer's current length, this has no
429    /// effect.
430    ///
431    /// The [`split_off`] method can emulate `truncate`, but this causes the
432    /// excess bytes to be returned instead of dropped.
433    ///
434    /// # Examples
435    ///
436    /// ```
437    /// use bytes::Bytes;
438    ///
439    /// let mut buf = Bytes::from(&b"hello world"[..]);
440    /// buf.truncate(5);
441    /// assert_eq!(buf, b"hello"[..]);
442    /// ```
443    ///
444    /// [`split_off`]: #method.split_off
445    #[inline]
446    pub fn truncate(&mut self, len: usize) {
447        if len < self.len {
448            // The Vec "promotable" vtables do not store the capacity,
449            // so we cannot truncate while using this repr. We *have* to
450            // promote using `split_off` so the capacity can be stored.
451            if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
452                || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
453            {
454                drop(self.split_off(len));
455            } else {
456                self.len = len;
457            }
458        }
459    }
460
461    /// Clears the buffer, removing all data.
462    ///
463    /// # Examples
464    ///
465    /// ```
466    /// use bytes::Bytes;
467    ///
468    /// let mut buf = Bytes::from(&b"hello world"[..]);
469    /// buf.clear();
470    /// assert!(buf.is_empty());
471    /// ```
472    #[inline]
473    pub fn clear(&mut self) {
474        self.truncate(0);
475    }
476
477    #[inline]
478    pub(crate) unsafe fn with_vtable(
479        ptr: *const u8,
480        len: usize,
481        data: AtomicPtr<()>,
482        vtable: &'static Vtable,
483    ) -> Bytes {
484        Bytes {
485            ptr,
486            len,
487            data,
488            vtable,
489        }
490    }
491
492    // private
493
494    #[inline]
495    fn as_slice(&self) -> &[u8] {
496        unsafe { slice::from_raw_parts(self.ptr, self.len) }
497    }
498
499    #[inline]
500    unsafe fn inc_start(&mut self, by: usize) {
501        // should already be asserted, but debug assert for tests
502        debug_assert!(self.len >= by, "internal: inc_start out of bounds");
503        self.len -= by;
504        self.ptr = self.ptr.offset(by as isize);
505    }
506}
507
508// Vtable must enforce this behavior
509unsafe impl Send for Bytes {}
510unsafe impl Sync for Bytes {}
511
512impl Drop for Bytes {
513    #[inline]
514    fn drop(&mut self) {
515        unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
516    }
517}
518
519impl Clone for Bytes {
520    #[inline]
521    fn clone(&self) -> Bytes {
522        unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
523    }
524}
525
526impl Buf for Bytes {
527    #[inline]
528    fn remaining(&self) -> usize {
529        self.len()
530    }
531
532    #[inline]
533    fn chunk(&self) -> &[u8] {
534        self.as_slice()
535    }
536
537    #[inline]
538    fn advance(&mut self, cnt: usize) {
539        assert!(
540            cnt <= self.len(),
541            "cannot advance past `remaining`: {:?} <= {:?}",
542            cnt,
543            self.len(),
544        );
545
546        unsafe {
547            self.inc_start(cnt);
548        }
549    }
550
551    fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
552        if len == self.remaining() {
553            core::mem::replace(self, Bytes::new())
554        } else {
555            let ret = self.slice(..len);
556            self.advance(len);
557            ret
558        }
559    }
560}
561
562impl Deref for Bytes {
563    type Target = [u8];
564
565    #[inline]
566    fn deref(&self) -> &[u8] {
567        self.as_slice()
568    }
569}
570
571impl AsRef<[u8]> for Bytes {
572    #[inline]
573    fn as_ref(&self) -> &[u8] {
574        self.as_slice()
575    }
576}
577
578impl hash::Hash for Bytes {
579    fn hash<H>(&self, state: &mut H)
580    where
581        H: hash::Hasher,
582    {
583        self.as_slice().hash(state);
584    }
585}
586
587impl Borrow<[u8]> for Bytes {
588    fn borrow(&self) -> &[u8] {
589        self.as_slice()
590    }
591}
592
593impl IntoIterator for Bytes {
594    type Item = u8;
595    type IntoIter = IntoIter<Bytes>;
596
597    fn into_iter(self) -> Self::IntoIter {
598        IntoIter::new(self)
599    }
600}
601
602impl<'a> IntoIterator for &'a Bytes {
603    type Item = &'a u8;
604    type IntoIter = core::slice::Iter<'a, u8>;
605
606    fn into_iter(self) -> Self::IntoIter {
607        self.as_slice().into_iter()
608    }
609}
610
611impl FromIterator<u8> for Bytes {
612    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
613        Vec::from_iter(into_iter).into()
614    }
615}
616
617// impl Eq
618
619impl PartialEq for Bytes {
620    fn eq(&self, other: &Bytes) -> bool {
621        self.as_slice() == other.as_slice()
622    }
623}
624
625impl PartialOrd for Bytes {
626    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
627        self.as_slice().partial_cmp(other.as_slice())
628    }
629}
630
631impl Ord for Bytes {
632    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
633        self.as_slice().cmp(other.as_slice())
634    }
635}
636
637impl Eq for Bytes {}
638
639impl PartialEq<[u8]> for Bytes {
640    fn eq(&self, other: &[u8]) -> bool {
641        self.as_slice() == other
642    }
643}
644
645impl PartialOrd<[u8]> for Bytes {
646    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
647        self.as_slice().partial_cmp(other)
648    }
649}
650
651impl PartialEq<Bytes> for [u8] {
652    fn eq(&self, other: &Bytes) -> bool {
653        *other == *self
654    }
655}
656
657impl PartialOrd<Bytes> for [u8] {
658    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
659        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
660    }
661}
662
663impl PartialEq<str> for Bytes {
664    fn eq(&self, other: &str) -> bool {
665        self.as_slice() == other.as_bytes()
666    }
667}
668
669impl PartialOrd<str> for Bytes {
670    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
671        self.as_slice().partial_cmp(other.as_bytes())
672    }
673}
674
675impl PartialEq<Bytes> for str {
676    fn eq(&self, other: &Bytes) -> bool {
677        *other == *self
678    }
679}
680
681impl PartialOrd<Bytes> for str {
682    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
683        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
684    }
685}
686
687impl PartialEq<Vec<u8>> for Bytes {
688    fn eq(&self, other: &Vec<u8>) -> bool {
689        *self == &other[..]
690    }
691}
692
693impl PartialOrd<Vec<u8>> for Bytes {
694    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
695        self.as_slice().partial_cmp(&other[..])
696    }
697}
698
699impl PartialEq<Bytes> for Vec<u8> {
700    fn eq(&self, other: &Bytes) -> bool {
701        *other == *self
702    }
703}
704
705impl PartialOrd<Bytes> for Vec<u8> {
706    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
707        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
708    }
709}
710
711impl PartialEq<String> for Bytes {
712    fn eq(&self, other: &String) -> bool {
713        *self == &other[..]
714    }
715}
716
717impl PartialOrd<String> for Bytes {
718    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
719        self.as_slice().partial_cmp(other.as_bytes())
720    }
721}
722
723impl PartialEq<Bytes> for String {
724    fn eq(&self, other: &Bytes) -> bool {
725        *other == *self
726    }
727}
728
729impl PartialOrd<Bytes> for String {
730    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
731        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
732    }
733}
734
735impl PartialEq<Bytes> for &[u8] {
736    fn eq(&self, other: &Bytes) -> bool {
737        *other == *self
738    }
739}
740
741impl PartialOrd<Bytes> for &[u8] {
742    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
743        <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
744    }
745}
746
747impl PartialEq<Bytes> for &str {
748    fn eq(&self, other: &Bytes) -> bool {
749        *other == *self
750    }
751}
752
753impl PartialOrd<Bytes> for &str {
754    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
755        <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
756    }
757}
758
759impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
760where
761    Bytes: PartialEq<T>,
762{
763    fn eq(&self, other: &&'a T) -> bool {
764        *self == **other
765    }
766}
767
768impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
769where
770    Bytes: PartialOrd<T>,
771{
772    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
773        self.partial_cmp(&**other)
774    }
775}
776
777// impl From
778
779impl Default for Bytes {
780    #[inline]
781    fn default() -> Bytes {
782        Bytes::new()
783    }
784}
785
786impl From<&'static [u8]> for Bytes {
787    fn from(slice: &'static [u8]) -> Bytes {
788        Bytes::from_static(slice)
789    }
790}
791
792impl From<&'static str> for Bytes {
793    fn from(slice: &'static str) -> Bytes {
794        Bytes::from_static(slice.as_bytes())
795    }
796}
797
798impl From<Vec<u8>> for Bytes {
799    fn from(vec: Vec<u8>) -> Bytes {
800        // into_boxed_slice doesn't return a heap allocation for empty vectors,
801        // so the pointer isn't aligned enough for the KIND_VEC stashing to
802        // work.
803        if vec.is_empty() {
804            return Bytes::new();
805        }
806
807        let slice = vec.into_boxed_slice();
808        let len = slice.len();
809        let ptr = Box::into_raw(slice) as *mut u8;
810
811        if ptr as usize & 0x1 == 0 {
812            let data = ptr as usize | KIND_VEC;
813            Bytes {
814                ptr,
815                len,
816                data: AtomicPtr::new(data as *mut _),
817                vtable: &PROMOTABLE_EVEN_VTABLE,
818            }
819        } else {
820            Bytes {
821                ptr,
822                len,
823                data: AtomicPtr::new(ptr as *mut _),
824                vtable: &PROMOTABLE_ODD_VTABLE,
825            }
826        }
827    }
828}
829
830impl From<String> for Bytes {
831    fn from(s: String) -> Bytes {
832        Bytes::from(s.into_bytes())
833    }
834}
835
836// ===== impl Vtable =====
837
838impl fmt::Debug for Vtable {
839    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
840        f.debug_struct("Vtable")
841            .field("clone", &(self.clone as *const ()))
842            .field("drop", &(self.drop as *const ()))
843            .finish()
844    }
845}
846
847// ===== impl StaticVtable =====
848
849const STATIC_VTABLE: Vtable = Vtable {
850    clone: static_clone,
851    drop: static_drop,
852};
853
854unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
855    let slice = slice::from_raw_parts(ptr, len);
856    Bytes::from_static(slice)
857}
858
859unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
860    // nothing to drop for &'static [u8]
861}
862
863// ===== impl PromotableVtable =====
864
865static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
866    clone: promotable_even_clone,
867    drop: promotable_even_drop,
868};
869
870static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
871    clone: promotable_odd_clone,
872    drop: promotable_odd_drop,
873};
874
875unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
876    let shared = data.load(Ordering::Acquire);
877    let kind = shared as usize & KIND_MASK;
878
879    if kind == KIND_ARC {
880        shallow_clone_arc(shared as _, ptr, len)
881    } else {
882        debug_assert_eq!(kind, KIND_VEC);
883        let buf = (shared as usize & !KIND_MASK) as *mut u8;
884        shallow_clone_vec(data, shared, buf, ptr, len)
885    }
886}
887
888unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
889    data.with_mut(|shared| {
890        let shared = *shared;
891        let kind = shared as usize & KIND_MASK;
892
893        if kind == KIND_ARC {
894            release_shared(shared as *mut Shared);
895        } else {
896            debug_assert_eq!(kind, KIND_VEC);
897            let buf = (shared as usize & !KIND_MASK) as *mut u8;
898            drop(rebuild_boxed_slice(buf, ptr, len));
899        }
900    });
901}
902
903unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
904    let shared = data.load(Ordering::Acquire);
905    let kind = shared as usize & KIND_MASK;
906
907    if kind == KIND_ARC {
908        shallow_clone_arc(shared as _, ptr, len)
909    } else {
910        debug_assert_eq!(kind, KIND_VEC);
911        shallow_clone_vec(data, shared, shared as *mut u8, ptr, len)
912    }
913}
914
915unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
916    data.with_mut(|shared| {
917        let shared = *shared;
918        let kind = shared as usize & KIND_MASK;
919
920        if kind == KIND_ARC {
921            release_shared(shared as *mut Shared);
922        } else {
923            debug_assert_eq!(kind, KIND_VEC);
924
925            drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
926        }
927    });
928}
929
930unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> {
931    let cap = (offset as usize - buf as usize) + len;
932    Box::from_raw(slice::from_raw_parts_mut(buf, cap))
933}
934
935// ===== impl SharedVtable =====
936
937struct Shared {
938    // holds vec for drop, but otherwise doesnt access it
939    _vec: Vec<u8>,
940    ref_cnt: AtomicUsize,
941}
942
943// Assert that the alignment of `Shared` is divisible by 2.
944// This is a necessary invariant since we depend on allocating `Shared` a
945// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
946// This flag is set when the LSB is 0.
947const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
948
949static SHARED_VTABLE: Vtable = Vtable {
950    clone: shared_clone,
951    drop: shared_drop,
952};
953
954const KIND_ARC: usize = 0b0;
955const KIND_VEC: usize = 0b1;
956const KIND_MASK: usize = 0b1;
957
958unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
959    let shared = data.load(Ordering::Relaxed);
960    shallow_clone_arc(shared as _, ptr, len)
961}
962
963unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
964    data.with_mut(|shared| {
965        release_shared(*shared as *mut Shared);
966    });
967}
968
969unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
970    let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed);
971
972    if old_size > usize::MAX >> 1 {
973        crate::abort();
974    }
975
976    Bytes {
977        ptr,
978        len,
979        data: AtomicPtr::new(shared as _),
980        vtable: &SHARED_VTABLE,
981    }
982}
983
984#[cold]
985unsafe fn shallow_clone_vec(
986    atom: &AtomicPtr<()>,
987    ptr: *const (),
988    buf: *mut u8,
989    offset: *const u8,
990    len: usize,
991) -> Bytes {
992    // If  the buffer is still tracked in a `Vec<u8>`. It is time to
993    // promote the vec to an `Arc`. This could potentially be called
994    // concurrently, so some care must be taken.
995
996    // First, allocate a new `Shared` instance containing the
997    // `Vec` fields. It's important to note that `ptr`, `len`,
998    // and `cap` cannot be mutated without having `&mut self`.
999    // This means that these fields will not be concurrently
1000    // updated and since the buffer hasn't been promoted to an
1001    // `Arc`, those three fields still are the components of the
1002    // vector.
1003    let vec = rebuild_boxed_slice(buf, offset, len).into_vec();
1004    let shared = Box::new(Shared {
1005        _vec: vec,
1006        // Initialize refcount to 2. One for this reference, and one
1007        // for the new clone that will be returned from
1008        // `shallow_clone`.
1009        ref_cnt: AtomicUsize::new(2),
1010    });
1011
1012    let shared = Box::into_raw(shared);
1013
1014    // The pointer should be aligned, so this assert should
1015    // always succeed.
1016    debug_assert!(
1017        0 == (shared as usize & KIND_MASK),
1018        "internal: Box<Shared> should have an aligned pointer",
1019    );
1020
1021    // Try compare & swapping the pointer into the `arc` field.
1022    // `Release` is used synchronize with other threads that
1023    // will load the `arc` field.
1024    //
1025    // If the `compare_exchange` fails, then the thread lost the
1026    // race to promote the buffer to shared. The `Acquire`
1027    // ordering will synchronize with the `compare_exchange`
1028    // that happened in the other thread and the `Shared`
1029    // pointed to by `actual` will be visible.
1030    match atom.compare_exchange(ptr as _, shared as _, Ordering::AcqRel, Ordering::Acquire) {
1031        Ok(actual) => {
1032            debug_assert!(actual as usize == ptr as usize);
1033            // The upgrade was successful, the new handle can be
1034            // returned.
1035            Bytes {
1036                ptr: offset,
1037                len,
1038                data: AtomicPtr::new(shared as _),
1039                vtable: &SHARED_VTABLE,
1040            }
1041        }
1042        Err(actual) => {
1043            // The upgrade failed, a concurrent clone happened. Release
1044            // the allocation that was made in this thread, it will not
1045            // be needed.
1046            let shared = Box::from_raw(shared);
1047            mem::forget(*shared);
1048
1049            // Buffer already promoted to shared storage, so increment ref
1050            // count.
1051            shallow_clone_arc(actual as _, offset, len)
1052        }
1053    }
1054}
1055
1056unsafe fn release_shared(ptr: *mut Shared) {
1057    // `Shared` storage... follow the drop steps from Arc.
1058    if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 {
1059        return;
1060    }
1061
1062    // This fence is needed to prevent reordering of use of the data and
1063    // deletion of the data.  Because it is marked `Release`, the decreasing
1064    // of the reference count synchronizes with this `Acquire` fence. This
1065    // means that use of the data happens before decreasing the reference
1066    // count, which happens before this fence, which happens before the
1067    // deletion of the data.
1068    //
1069    // As explained in the [Boost documentation][1],
1070    //
1071    // > It is important to enforce any possible access to the object in one
1072    // > thread (through an existing reference) to *happen before* deleting
1073    // > the object in a different thread. This is achieved by a "release"
1074    // > operation after dropping a reference (any access to the object
1075    // > through this reference must obviously happened before), and an
1076    // > "acquire" operation before deleting the object.
1077    //
1078    // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1079    atomic::fence(Ordering::Acquire);
1080
1081    // Drop the data
1082    Box::from_raw(ptr);
1083}
1084
1085// compile-fails
1086
1087/// ```compile_fail
1088/// use bytes::Bytes;
1089/// #[deny(unused_must_use)]
1090/// {
1091///     let mut b1 = Bytes::from("hello world");
1092///     b1.split_to(6);
1093/// }
1094/// ```
1095fn _split_to_must_use() {}
1096
1097/// ```compile_fail
1098/// use bytes::Bytes;
1099/// #[deny(unused_must_use)]
1100/// {
1101///     let mut b1 = Bytes::from("hello world");
1102///     b1.split_off(6);
1103/// }
1104/// ```
1105fn _split_off_must_use() {}
1106
1107// fuzz tests
1108#[cfg(all(test, loom))]
1109mod fuzz {
1110    use loom::sync::Arc;
1111    use loom::thread;
1112
1113    use super::Bytes;
1114    #[test]
1115    fn bytes_cloning_vec() {
1116        loom::model(|| {
1117            let a = Bytes::from(b"abcdefgh".to_vec());
1118            let addr = a.as_ptr() as usize;
1119
1120            // test the Bytes::clone is Sync by putting it in an Arc
1121            let a1 = Arc::new(a);
1122            let a2 = a1.clone();
1123
1124            let t1 = thread::spawn(move || {
1125                let b: Bytes = (*a1).clone();
1126                assert_eq!(b.as_ptr() as usize, addr);
1127            });
1128
1129            let t2 = thread::spawn(move || {
1130                let b: Bytes = (*a2).clone();
1131                assert_eq!(b.as_ptr() as usize, addr);
1132            });
1133
1134            t1.join().unwrap();
1135            t2.join().unwrap();
1136        });
1137    }
1138}