ntex_bytes/
bytes.rs

1use std::borrow::{Borrow, BorrowMut};
2use std::ops::{Deref, DerefMut, RangeBounds};
3use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
4use std::sync::atomic::{self, AtomicUsize};
5use std::{cmp, fmt, hash, mem, ptr, ptr::NonNull, slice};
6
7use crate::pool::{PoolId, PoolRef};
8use crate::{buf::IntoIter, buf::UninitSlice, debug, Buf, BufMut};
9
10/// A reference counted contiguous slice of memory.
11///
12/// `Bytes` is an efficient container for storing and operating on contiguous
13/// slices of memory. It is intended for use primarily in networking code, but
14/// could have applications elsewhere as well.
15///
16/// `Bytes` values facilitate zero-copy network programming by allowing multiple
17/// `Bytes` objects to point to the same underlying memory. This is managed by
18/// using a reference count to track when the memory is no longer needed and can
19/// be freed.
20///
21/// ```
22/// use ntex_bytes::Bytes;
23///
24/// let mut mem = Bytes::from(&b"Hello world"[..]);
25/// let a = mem.slice(0..5);
26///
27/// assert_eq!(a, b"Hello");
28///
29/// let b = mem.split_to(6);
30///
31/// assert_eq!(mem, b"world");
32/// assert_eq!(b, b"Hello ");
33/// ```
34///
35/// # Memory layout
36///
37/// The `Bytes` struct itself is fairly small, limited to a pointer to the
38/// memory and 4 `usize` fields used to track information about which segment of
39/// the underlying memory the `Bytes` handle has access to.
40///
41/// The memory layout looks like this:
42///
43/// ```text
44/// +-------+
45/// | Bytes |
46/// +-------+
47///  /      \_____
48/// |              \
49/// v               v
50/// +-----+------------------------------------+
51/// | Arc |         |      Data     |          |
52/// +-----+------------------------------------+
53/// ```
54///
55/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory
56/// slice and a pointer to the start of the region visible by the handle.
57/// `Bytes` also tracks the length of its view into the memory.
58///
59/// # Sharing
60///
61/// The memory itself is reference counted, and multiple `Bytes` objects may
62/// point to the same region. Each `Bytes` handle point to different sections within
63/// the memory region, and `Bytes` handle may or may not have overlapping views
64/// into the memory.
65///
66///
67/// ```text
68///
69///    Arc ptrs                   +---------+
70///    ________________________ / | Bytes 2 |
71///   /                           +---------+
72///  /          +-----------+     |         |
73/// |_________/ |  Bytes 1  |     |         |
74/// |           +-----------+     |         |
75/// |           |           | ___/ data     | tail
76/// |      data |      tail |/              |
77/// v           v           v               v
78/// +-----+---------------------------------+-----+
79/// | Arc |     |           |               |     |
80/// +-----+---------------------------------+-----+
81/// ```
82///
83/// # Mutating
84///
85/// While `Bytes` handles may potentially represent overlapping views of the
86/// underlying memory slice and may not be mutated, `BytesMut` handles are
87/// guaranteed to be the only handle able to view that slice of memory. As such,
88/// `BytesMut` handles are able to mutate the underlying memory. Note that
89/// holding a unique view to a region of memory does not mean that there are no
90/// other `Bytes` and `BytesMut` handles with disjoint views of the underlying
91/// memory.
92///
93/// # Inline bytes
94///
95/// As an optimization, when the slice referenced by a `Bytes` handle is small
96/// enough [^1]. In this case, a clone is no longer "shallow" and the data will
97/// be copied.  Converting from a `Vec` will never use inlining. `BytesMut` does
98/// not support data inlining and always allocates, but during converion to `Bytes`
99/// data from `BytesMut` could be inlined.
100///
101/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems.
102///
103pub struct Bytes {
104    inner: Inner,
105}
106
107/// A unique reference to a contiguous slice of memory.
108///
109/// `BytesMut` represents a unique view into a potentially shared memory region.
110/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
111/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
112/// allocations.
113///
114/// For more detail, see [Bytes](struct.Bytes.html).
115///
116/// # Growth
117///
118/// One key difference from `Vec<u8>` is that most operations **do not
119/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
120/// world");` could panic if `my_bytes` does not have enough capacity. Before
121/// writing to the buffer, ensure that there is enough remaining capacity by
122/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
123/// is preferable.
124///
125/// The only exception is `extend` which implicitly reserves required capacity.
126///
127/// # Examples
128///
129/// ```
130/// use ntex_bytes::{BytesMut, BufMut};
131///
132/// let mut buf = BytesMut::with_capacity(64);
133///
134/// buf.put_u8(b'h');
135/// buf.put_u8(b'e');
136/// buf.put("llo");
137///
138/// assert_eq!(buf, b"hello");
139///
140/// // Freeze the buffer so that it can be shared
141/// let a = buf.freeze();
142///
143/// // This does not allocate, instead `b` points to the same memory.
144/// let b = a.clone();
145///
146/// assert_eq!(a, b"hello");
147/// assert_eq!(b, b"hello");
148/// ```
149pub struct BytesMut {
150    inner: Inner,
151}
152
153/// A unique reference to a contiguous slice of memory.
154///
155/// `BytesVec` represents a unique view into a potentially shared memory region.
156/// Given the uniqueness guarantee, owners of `BytesVec` handles are able to
157/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
158/// allocations. It also always allocates.
159///
160/// For more detail, see [Bytes](struct.Bytes.html).
161///
162/// # Growth
163///
164/// One key difference from `Vec<u8>` is that most operations **do not
165/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
166/// world");` could panic if `my_bytes` does not have enough capacity. Before
167/// writing to the buffer, ensure that there is enough remaining capacity by
168/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
169/// is preferable.
170///
171/// The only exception is `extend` which implicitly reserves required capacity.
172///
173/// # Examples
174///
175/// ```
176/// use ntex_bytes::{BytesVec, BufMut};
177///
178/// let mut buf = BytesVec::with_capacity(64);
179///
180/// buf.put_u8(b'h');
181/// buf.put_u8(b'e');
182/// buf.put("llo");
183///
184/// assert_eq!(&buf[..], b"hello");
185///
186/// // Freeze the buffer so that it can be shared
187/// let a = buf.freeze();
188///
189/// // This does not allocate, instead `b` points to the same memory.
190/// let b = a.clone();
191///
192/// assert_eq!(a, b"hello");
193/// assert_eq!(b, b"hello");
194/// ```
195pub struct BytesVec {
196    inner: InnerVec,
197}
198
199// Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated
200// to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions
201// that mutate the underlying buffer are only performed when the data range
202// being mutated is only available via a single `BytesMut` handle.
203//
204// # Data storage modes
205//
206// The goal of `bytes` is to be as efficient as possible across a wide range of
207// potential usage patterns. As such, `bytes` needs to be able to handle buffers
208// that are never shared, shared on a single thread, and shared across many
209// threads. `bytes` also needs to handle both tiny buffers as well as very large
210// buffers. For example, [Cassandra](http://cassandra.apache.org) values have
211// been known to be in the hundreds of megabyte, and HTTP header values can be a
212// few characters in size.
213//
214// To achieve high performance in these various situations, `Bytes` and
215// `BytesMut` use different strategies for storing the buffer depending on the
216// usage pattern.
217//
218// ## Delayed `Arc` allocation
219//
220// When a `Bytes` or `BytesMut` is first created, there is only one outstanding
221// handle referencing the buffer. Since sharing is not yet required, an `Arc`* is
222// not used and the buffer is backed by a `Vec<u8>` directly. Using an
223// `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being
224// shared, that allocation is avoided.
225//
226// When sharing does become necessary (`clone`, `split_to`, `split_off`), that
227// is when the buffer is promoted to being shareable. The `Vec<u8>` is moved
228// into an `Arc` and both the original handle and the new handle use the same
229// buffer via the `Arc`.
230//
231// * `Arc` is being used to signify an atomically reference counted cell. We
232// don't use the `Arc` implementation provided by `std` and instead use our own.
233// This ends up simplifying a number of the `unsafe` code snippets.
234//
235// ## Inlining small buffers
236//
237// The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit
238// systems, this ends up being 32 bytes, which is actually a lot of storage for
239// cases where `Bytes` is being used to represent small byte strings, such as
240// HTTP header names and values.
241//
242// To avoid any allocation at all in these cases, `Bytes` will use the struct
243// itself for storing the buffer, reserving 1 byte for meta data. This means
244// that, on 64 bit systems, 31 byte buffers require no allocation at all.
245//
246// The byte used for metadata stores a 2 bits flag used to indicate that the
247// buffer is stored inline as well as 6 bits for tracking the buffer length (the
248// return value of `Bytes::len`).
249//
250// ## Static buffers
251//
252// `Bytes` can also represent a static buffer, which is created with
253// `Bytes::from_static`. No copying or allocations are required for tracking
254// static buffers. The pointer to the `&'static [u8]`, the length, and a flag
255// tracking that the `Bytes` instance represents a static buffer is stored in
256// the `Bytes` struct.
257//
258// # Struct layout
259//
260// Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the
261// data fields as well as all of the function implementations.
262//
263// The `Inner` struct is carefully laid out in order to support the
264// functionality described above as well as being as small as possible. Size is
265// important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes
266// added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header
267// map structure.
268//
269// The `Inner` struct contains the following fields:
270//
271// * `ptr: *mut u8`
272// * `len: usize`
273// * `cap: usize`
274// * `arc: *mut Shared`
275//
276// ## `ptr: *mut u8`
277//
278// A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`,
279// this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr`
280// may have been shifted to point somewhere inside the buffer.
281//
282// When in "inlined" mode, `ptr` is used as part of the inlined buffer.
283//
284// ## `len: usize`
285//
286// The length of the handle's buffer view. When backed by a `Vec<u8>`, this is
287// always the `Vec`'s length. The slice represented by `ptr` and `len` should
288// (ideally) always be initialized memory.
289//
290// When in "inlined" mode, `len` is used as part of the inlined buffer.
291//
292// ## `cap: usize`
293//
294// The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is
295// always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len`
296// may or may not be initialized memory.
297//
298// When in "inlined" mode, `cap` is used as part of the inlined buffer.
299//
300// ## `arc: *mut Shared`
301//
302// When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this
303// will be the pointer to the `Arc` structure tracking the ref count for the
304// underlying buffer. When the pointer is null, then the `Arc` has not been
305// allocated yet and `self` is the only outstanding handle for the underlying
306// buffer.
307//
308// The lower two bits of `arc` are used to track the storage mode of `Inner`.
309// `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11`
310// indicates vector storage, not yet promoted to Arc.  Since pointers to
311// allocated structures are aligned, the lower two bits of a pointer will always
312// be 0. This allows disambiguating between a pointer and the two flags.
313//
314// When in "inlined" mode, the least significant byte of `arc` is also used to
315// store the length of the buffer view (vs. the capacity, which is a constant).
316//
317// The rest of `arc`'s bytes are used as part of the inline buffer, which means
318// that those bytes need to be located next to the `ptr`, `len`, and `cap`
319// fields, which make up the rest of the inline buffer. This requires special
320// casing the layout of `Inner` depending on if the target platform is big or
321// little endian.
322//
323// On little endian platforms, the `arc` field must be the first field in the
324// struct. On big endian platforms, the `arc` field must be the last field in
325// the struct. Since a deterministic struct layout is required, `Inner` is
326// annotated with `#[repr(C)]`.
327//
328// # Thread safety
329//
330// `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done
331// by bumping the buffer ref count and returning a new struct pointing to the
332// same buffer. However, the `Arc` structure is lazily allocated. This means
333// that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone`
334// function can be called concurrently from multiple threads. This is why an
335// `AtomicPtr` is used for the `arc` field vs. a `*const`.
336//
337// Care is taken to ensure that the need for synchronization is minimized. Most
338// operations do not require any synchronization.
339//
340#[cfg(target_endian = "little")]
341#[repr(C)]
342struct Inner {
343    // WARNING: Do not access the fields directly unless you know what you are
344    // doing. Instead, use the fns. See implementation comment above.
345    arc: NonNull<Shared>,
346    ptr: *mut u8,
347    len: usize,
348    cap: usize,
349}
350
351#[cfg(target_endian = "big")]
352#[repr(C)]
353struct Inner {
354    // WARNING: Do not access the fields directly unless you know what you are
355    // doing. Instead, use the fns. See implementation comment above.
356    ptr: *mut u8,
357    len: usize,
358    cap: usize,
359    arc: NonNull<Shared>,
360}
361
362// Thread-safe reference-counted container for the shared storage. This mostly
363// the same as `std::sync::Arc` but without the weak counter. The ref counting
364// fns are based on the ones found in `std`.
365//
366// The main reason to use `Shared` instead of `std::sync::Arc` is that it ends
367// up making the overall code simpler and easier to reason about. This is due to
368// some of the logic around setting `Inner::arc` and other ways the `arc` field
369// is used. Using `Arc` ended up requiring a number of funky transmutes and
370// other shenanigans to make it work.
371struct Shared {
372    vec: Vec<u8>,
373    ref_count: AtomicUsize,
374    pool: PoolRef,
375}
376
377struct SharedVec {
378    cap: usize,
379    len: u32,
380    offset: u32,
381    ref_count: AtomicUsize,
382    pool: PoolRef,
383}
384
385// Buffer storage strategy flags.
386const KIND_ARC: usize = 0b00;
387const KIND_INLINE: usize = 0b01;
388const KIND_STATIC: usize = 0b10;
389const KIND_VEC: usize = 0b11;
390const KIND_MASK: usize = 0b11;
391const KIND_UNMASK: usize = !KIND_MASK;
392
393const MIN_NON_ZERO_CAP: usize = 64;
394const SHARED_VEC_SIZE: usize = mem::size_of::<SharedVec>();
395
396// Bit op constants for extracting the inline length value from the `arc` field.
397const INLINE_LEN_MASK: usize = 0b1111_1100;
398const INLINE_LEN_OFFSET: usize = 2;
399
400// Byte offset from the start of `Inner` to where the inline buffer data
401// starts. On little endian platforms, the first byte of the struct is the
402// storage flag, so the data is shifted by a byte. On big endian systems, the
403// data starts at the beginning of the struct.
404#[cfg(target_endian = "little")]
405const INLINE_DATA_OFFSET: isize = 2;
406#[cfg(target_endian = "big")]
407const INLINE_DATA_OFFSET: isize = 0;
408
409// Inline buffer capacity. This is the size of `Inner` minus 1 byte for the
410// metadata.
411#[cfg(target_pointer_width = "64")]
412const INLINE_CAP: usize = 4 * 8 - 2;
413#[cfg(target_pointer_width = "32")]
414const INLINE_CAP: usize = 4 * 4 - 2;
415
416/*
417 *
418 * ===== Bytes =====
419 *
420 */
421
422impl Bytes {
423    /// Creates a new empty `Bytes`.
424    ///
425    /// This will not allocate and the returned `Bytes` handle will be empty.
426    ///
427    /// # Examples
428    ///
429    /// ```
430    /// use ntex_bytes::Bytes;
431    ///
432    /// let b = Bytes::new();
433    /// assert_eq!(&b[..], b"");
434    /// ```
435    #[inline]
436    pub const fn new() -> Bytes {
437        Bytes {
438            inner: Inner::empty_inline(),
439        }
440    }
441
442    /// Creates a new `Bytes` from a static slice.
443    ///
444    /// The returned `Bytes` will point directly to the static slice. There is
445    /// no allocating or copying.
446    ///
447    /// # Examples
448    ///
449    /// ```
450    /// use ntex_bytes::Bytes;
451    ///
452    /// let b = Bytes::from_static(b"hello");
453    /// assert_eq!(&b[..], b"hello");
454    /// ```
455    #[inline]
456    pub const fn from_static(bytes: &'static [u8]) -> Bytes {
457        Bytes {
458            inner: Inner::from_static(bytes),
459        }
460    }
461
462    /// Returns the number of bytes contained in this `Bytes`.
463    ///
464    /// # Examples
465    ///
466    /// ```
467    /// use ntex_bytes::Bytes;
468    ///
469    /// let b = Bytes::from(&b"hello"[..]);
470    /// assert_eq!(b.len(), 5);
471    /// ```
472    #[inline]
473    pub fn len(&self) -> usize {
474        self.inner.len()
475    }
476
477    /// Returns true if the `Bytes` has a length of 0.
478    ///
479    /// # Examples
480    ///
481    /// ```
482    /// use ntex_bytes::Bytes;
483    ///
484    /// let b = Bytes::new();
485    /// assert!(b.is_empty());
486    /// ```
487    #[inline]
488    pub fn is_empty(&self) -> bool {
489        self.inner.is_empty()
490    }
491
492    /// Return true if the `Bytes` uses inline allocation
493    ///
494    /// # Examples
495    /// ```
496    /// use ntex_bytes::{Bytes, BytesMut};
497    ///
498    /// assert!(Bytes::from(BytesMut::from(&[0, 0, 0, 0][..])).is_inline());
499    /// assert!(Bytes::from(Vec::with_capacity(4)).is_inline());
500    /// assert!(!Bytes::from(&[0; 1024][..]).is_inline());
501    /// ```
502    pub fn is_inline(&self) -> bool {
503        self.inner.is_inline()
504    }
505
506    /// Creates `Bytes` instance from slice, by copying it.
507    pub fn copy_from_slice(data: &[u8]) -> Self {
508        Self::copy_from_slice_in(data, PoolId::DEFAULT)
509    }
510
511    /// Creates `Bytes` instance from slice, by copying it.
512    pub fn copy_from_slice_in<T>(data: &[u8], pool: T) -> Self
513    where
514        PoolRef: From<T>,
515    {
516        if data.len() <= INLINE_CAP {
517            Bytes {
518                inner: Inner::from_slice_inline(data),
519            }
520        } else {
521            Bytes {
522                inner: Inner::from_slice(data.len(), data, pool.into()),
523            }
524        }
525    }
526
527    /// Returns a slice of self for the provided range.
528    ///
529    /// This will increment the reference count for the underlying memory and
530    /// return a new `Bytes` handle set to the slice.
531    ///
532    /// This operation is `O(1)`.
533    ///
534    /// # Examples
535    ///
536    /// ```
537    /// use ntex_bytes::Bytes;
538    ///
539    /// let a = Bytes::from(b"hello world");
540    /// let b = a.slice(2..5);
541    ///
542    /// assert_eq!(&b[..], b"llo");
543    /// assert_eq!(&b[..=1], b"ll");
544    /// assert_eq!(&b[1..=1], b"l");
545    /// ```
546    ///
547    /// # Panics
548    ///
549    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
550    /// will panic.
551    pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
552        self.slice_checked(range)
553            .expect("Requires that `begin <= end` and `end <= self.len()`")
554    }
555
556    /// Returns a slice of self for the provided range.
557    ///
558    /// Does nothing if `begin <= end` or `end <= self.len()`
559    pub fn slice_checked(&self, range: impl RangeBounds<usize>) -> Option<Bytes> {
560        use std::ops::Bound;
561
562        let len = self.len();
563
564        let begin = match range.start_bound() {
565            Bound::Included(&n) => n,
566            Bound::Excluded(&n) => n + 1,
567            Bound::Unbounded => 0,
568        };
569
570        let end = match range.end_bound() {
571            Bound::Included(&n) => n + 1,
572            Bound::Excluded(&n) => n,
573            Bound::Unbounded => len,
574        };
575
576        if begin <= end && end <= len {
577            if end - begin <= INLINE_CAP {
578                Some(Bytes {
579                    inner: Inner::from_slice_inline(&self[begin..end]),
580                })
581            } else {
582                let mut ret = self.clone();
583                unsafe {
584                    ret.inner.set_end(end);
585                    ret.inner.set_start(begin);
586                }
587                Some(ret)
588            }
589        } else {
590            None
591        }
592    }
593
594    /// Returns a slice of self that is equivalent to the given `subset`.
595    ///
596    /// When processing a `Bytes` buffer with other tools, one often gets a
597    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
598    /// This function turns that `&[u8]` into another `Bytes`, as if one had
599    /// called `self.slice()` with the offsets that correspond to `subset`.
600    ///
601    /// This operation is `O(1)`.
602    ///
603    /// # Examples
604    ///
605    /// ```
606    /// use ntex_bytes::Bytes;
607    ///
608    /// let bytes = Bytes::from(&b"012345678"[..]);
609    /// let as_slice = bytes.as_ref();
610    /// let subset = &as_slice[2..6];
611    /// let subslice = bytes.slice_ref(&subset);
612    /// assert_eq!(subslice, b"2345");
613    /// ```
614    ///
615    /// # Panics
616    ///
617    /// Requires that the given `sub` slice is in fact contained within the
618    /// `Bytes` buffer; otherwise this function will panic.
619    pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
620        self.slice_ref_checked(subset)
621            .expect("Given `sub` slice is not contained within the `Bytes` buffer")
622    }
623
624    /// Returns a slice of self that is equivalent to the given `subset`.
625    pub fn slice_ref_checked(&self, subset: &[u8]) -> Option<Bytes> {
626        let bytes_p = self.as_ptr() as usize;
627        let bytes_len = self.len();
628
629        let sub_p = subset.as_ptr() as usize;
630        let sub_len = subset.len();
631
632        if sub_p >= bytes_p && sub_p + sub_len <= bytes_p + bytes_len {
633            let sub_offset = sub_p - bytes_p;
634            Some(self.slice(sub_offset..(sub_offset + sub_len)))
635        } else {
636            None
637        }
638    }
639
640    /// Splits the bytes into two at the given index.
641    ///
642    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
643    /// contains elements `[at, len)`.
644    ///
645    /// This is an `O(1)` operation that just increases the reference count and
646    /// sets a few indices.
647    ///
648    /// # Examples
649    ///
650    /// ```
651    /// use ntex_bytes::Bytes;
652    ///
653    /// let mut a = Bytes::from(&b"hello world"[..]);
654    /// let b = a.split_off(5);
655    ///
656    /// assert_eq!(a, b"hello");
657    /// assert_eq!(b, b" world");
658    /// ```
659    ///
660    /// # Panics
661    ///
662    /// Panics if `at > self.len()`.
663    pub fn split_off(&mut self, at: usize) -> Bytes {
664        self.split_off_checked(at)
665            .expect("at value must be <= self.len()`")
666    }
667
668    /// Splits the bytes into two at the given index.
669    ///
670    /// Does nothing if `at > self.len()`
671    pub fn split_off_checked(&mut self, at: usize) -> Option<Bytes> {
672        if at <= self.len() {
673            if at == self.len() {
674                Some(Bytes::new())
675            } else if at == 0 {
676                Some(mem::take(self))
677            } else {
678                Some(Bytes {
679                    inner: self.inner.split_off(at, true),
680                })
681            }
682        } else {
683            None
684        }
685    }
686
687    /// Splits the bytes into two at the given index.
688    ///
689    /// Afterwards `self` contains elements `[at, len)`, and the returned
690    /// `Bytes` contains elements `[0, at)`.
691    ///
692    /// This is an `O(1)` operation that just increases the reference count and
693    /// sets a few indices.
694    ///
695    /// # Examples
696    ///
697    /// ```
698    /// use ntex_bytes::Bytes;
699    ///
700    /// let mut a = Bytes::from(&b"hello world"[..]);
701    /// let b = a.split_to(5);
702    ///
703    /// assert_eq!(a, b" world");
704    /// assert_eq!(b, b"hello");
705    /// ```
706    ///
707    /// # Panics
708    ///
709    /// Panics if `at > len`.
710    pub fn split_to(&mut self, at: usize) -> Bytes {
711        self.split_to_checked(at)
712            .expect("at value must be <= self.len()`")
713    }
714
715    /// Splits the bytes into two at the given index.
716    ///
717    /// Does nothing if `at > len`.
718    pub fn split_to_checked(&mut self, at: usize) -> Option<Bytes> {
719        if at <= self.len() {
720            if at == self.len() {
721                Some(mem::take(self))
722            } else if at == 0 {
723                Some(Bytes::new())
724            } else {
725                Some(Bytes {
726                    inner: self.inner.split_to(at, true),
727                })
728            }
729        } else {
730            None
731        }
732    }
733
734    /// Shortens the buffer, keeping the first `len` bytes and dropping the
735    /// rest.
736    ///
737    /// If `len` is greater than the buffer's current length, this has no
738    /// effect.
739    ///
740    /// The [`split_off`] method can emulate `truncate`, but this causes the
741    /// excess bytes to be returned instead of dropped.
742    ///
743    /// # Examples
744    ///
745    /// ```
746    /// use ntex_bytes::Bytes;
747    ///
748    /// let mut buf = Bytes::from(&b"hello world"[..]);
749    /// buf.truncate(5);
750    /// assert_eq!(buf, b"hello"[..]);
751    /// ```
752    ///
753    /// [`split_off`]: #method.split_off
754    #[inline]
755    pub fn truncate(&mut self, len: usize) {
756        self.inner.truncate(len, true);
757    }
758
759    /// Shortens the buffer to `len` bytes and dropping the rest.
760    ///
761    /// This is useful if underlying buffer is larger than cuurrent bytes object.
762    ///
763    /// # Examples
764    ///
765    /// ```
766    /// use ntex_bytes::Bytes;
767    ///
768    /// let mut buf = Bytes::from(&b"hello world"[..]);
769    /// buf.trimdown();
770    /// assert_eq!(buf, b"hello world"[..]);
771    /// ```
772    #[inline]
773    pub fn trimdown(&mut self) {
774        let kind = self.inner.kind();
775
776        // trim down only if buffer is not inline or static and
777        // buffer's unused space is greater than 64 bytes
778        if !(kind == KIND_INLINE || kind == KIND_STATIC) {
779            if self.inner.len() <= INLINE_CAP {
780                *self = Bytes {
781                    inner: Inner::from_slice_inline(self),
782                };
783            } else if self.inner.capacity() - self.inner.len() >= 64 {
784                *self = Bytes {
785                    inner: Inner::from_slice(self.len(), self, self.inner.pool()),
786                }
787            }
788        }
789    }
790
791    /// Clears the buffer, removing all data.
792    ///
793    /// # Examples
794    ///
795    /// ```
796    /// use ntex_bytes::Bytes;
797    ///
798    /// let mut buf = Bytes::from(&b"hello world"[..]);
799    /// buf.clear();
800    /// assert!(buf.is_empty());
801    /// ```
802    #[inline]
803    pub fn clear(&mut self) {
804        self.inner = Inner::empty_inline();
805    }
806
807    /// Attempts to convert into a `BytesMut` handle.
808    ///
809    /// This will only succeed if there are no other outstanding references to
810    /// the underlying chunk of memory. `Bytes` handles that contain inlined
811    /// bytes will always be convertible to `BytesMut`.
812    ///
813    /// # Examples
814    ///
815    /// ```
816    /// use ntex_bytes::Bytes;
817    ///
818    /// let a = Bytes::copy_from_slice(&b"Mary had a little lamb, little lamb, little lamb..."[..]);
819    ///
820    /// // Create a shallow clone
821    /// let b = a.clone();
822    ///
823    /// // This will fail because `b` shares a reference with `a`
824    /// let a = a.try_mut().unwrap_err();
825    ///
826    /// drop(b);
827    ///
828    /// // This will succeed
829    /// let mut a = a.try_mut().unwrap();
830    ///
831    /// a[0] = b'b';
832    ///
833    /// assert_eq!(&a[..4], b"bary");
834    /// ```
835    pub fn try_mut(self) -> Result<BytesMut, Bytes> {
836        if self.inner.is_mut_safe() {
837            Ok(BytesMut { inner: self.inner })
838        } else {
839            Err(self)
840        }
841    }
842
843    /// Returns an iterator over the bytes contained by the buffer.
844    ///
845    /// # Examples
846    ///
847    /// ```
848    /// use ntex_bytes::{Buf, Bytes};
849    ///
850    /// let buf = Bytes::from(&b"abc"[..]);
851    /// let mut iter = buf.iter();
852    ///
853    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
854    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
855    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
856    /// assert_eq!(iter.next(), None);
857    /// ```
858    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
859        self.chunk().iter()
860    }
861}
862
863impl Buf for Bytes {
864    #[inline]
865    fn remaining(&self) -> usize {
866        self.len()
867    }
868
869    #[inline]
870    fn chunk(&self) -> &[u8] {
871        self.inner.as_ref()
872    }
873
874    #[inline]
875    fn advance(&mut self, cnt: usize) {
876        assert!(
877            cnt <= self.inner.as_ref().len(),
878            "cannot advance past `remaining`"
879        );
880        unsafe {
881            self.inner.set_start(cnt);
882        }
883    }
884}
885
886impl bytes::buf::Buf for Bytes {
887    #[inline]
888    fn remaining(&self) -> usize {
889        self.len()
890    }
891
892    #[inline]
893    fn chunk(&self) -> &[u8] {
894        self.inner.as_ref()
895    }
896
897    #[inline]
898    fn advance(&mut self, cnt: usize) {
899        assert!(
900            cnt <= self.inner.as_ref().len(),
901            "cannot advance past `remaining`"
902        );
903        unsafe {
904            self.inner.set_start(cnt);
905        }
906    }
907}
908
909impl Clone for Bytes {
910    fn clone(&self) -> Bytes {
911        Bytes {
912            inner: unsafe { self.inner.shallow_clone() },
913        }
914    }
915}
916
917impl AsRef<[u8]> for Bytes {
918    #[inline]
919    fn as_ref(&self) -> &[u8] {
920        self.inner.as_ref()
921    }
922}
923
924impl Deref for Bytes {
925    type Target = [u8];
926
927    #[inline]
928    fn deref(&self) -> &[u8] {
929        self.inner.as_ref()
930    }
931}
932
933impl From<&Bytes> for Bytes {
934    fn from(src: &Bytes) -> Bytes {
935        src.clone()
936    }
937}
938
939impl From<BytesMut> for Bytes {
940    fn from(src: BytesMut) -> Bytes {
941        src.freeze()
942    }
943}
944
945impl From<Vec<u8>> for Bytes {
946    /// Convert a `Vec` into a `Bytes`
947    ///
948    /// This constructor may be used to avoid the inlining optimization used by
949    /// `with_capacity`.  A `Bytes` constructed this way will always store its
950    /// data on the heap.
951    fn from(src: Vec<u8>) -> Bytes {
952        if src.is_empty() {
953            Bytes::new()
954        } else if src.len() <= INLINE_CAP {
955            Bytes {
956                inner: Inner::from_slice_inline(&src),
957            }
958        } else {
959            BytesMut::from(src).freeze()
960        }
961    }
962}
963
964impl From<String> for Bytes {
965    fn from(src: String) -> Bytes {
966        if src.is_empty() {
967            Bytes::new()
968        } else if src.len() <= INLINE_CAP {
969            Bytes {
970                inner: Inner::from_slice_inline(src.as_bytes()),
971            }
972        } else {
973            BytesMut::from(src).freeze()
974        }
975    }
976}
977
978impl From<&'static [u8]> for Bytes {
979    fn from(src: &'static [u8]) -> Bytes {
980        Bytes::from_static(src)
981    }
982}
983
984impl From<&'static str> for Bytes {
985    fn from(src: &'static str) -> Bytes {
986        Bytes::from_static(src.as_bytes())
987    }
988}
989
990impl<'a, const N: usize> From<&'a [u8; N]> for Bytes {
991    fn from(src: &'a [u8; N]) -> Bytes {
992        Bytes::copy_from_slice(src)
993    }
994}
995
996impl FromIterator<u8> for Bytes {
997    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
998        BytesMut::from_iter(into_iter).freeze()
999    }
1000}
1001
1002impl<'a> FromIterator<&'a u8> for Bytes {
1003    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1004        BytesMut::from_iter(into_iter).freeze()
1005    }
1006}
1007
1008impl Eq for Bytes {}
1009
1010impl PartialEq for Bytes {
1011    fn eq(&self, other: &Bytes) -> bool {
1012        self.inner.as_ref() == other.inner.as_ref()
1013    }
1014}
1015
1016impl PartialOrd for Bytes {
1017    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
1018        Some(self.cmp(other))
1019    }
1020}
1021
1022impl Ord for Bytes {
1023    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
1024        self.inner.as_ref().cmp(other.inner.as_ref())
1025    }
1026}
1027
1028impl Default for Bytes {
1029    #[inline]
1030    fn default() -> Bytes {
1031        Bytes::new()
1032    }
1033}
1034
1035impl fmt::Debug for Bytes {
1036    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1037        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
1038    }
1039}
1040
1041impl hash::Hash for Bytes {
1042    fn hash<H>(&self, state: &mut H)
1043    where
1044        H: hash::Hasher,
1045    {
1046        let s: &[u8] = self.as_ref();
1047        s.hash(state);
1048    }
1049}
1050
1051impl Borrow<[u8]> for Bytes {
1052    fn borrow(&self) -> &[u8] {
1053        self.as_ref()
1054    }
1055}
1056
1057impl IntoIterator for Bytes {
1058    type Item = u8;
1059    type IntoIter = IntoIter<Bytes>;
1060
1061    fn into_iter(self) -> Self::IntoIter {
1062        IntoIter::new(self)
1063    }
1064}
1065
1066impl<'a> IntoIterator for &'a Bytes {
1067    type Item = &'a u8;
1068    type IntoIter = std::slice::Iter<'a, u8>;
1069
1070    fn into_iter(self) -> Self::IntoIter {
1071        self.as_ref().iter()
1072    }
1073}
1074
1075/*
1076 *
1077 * ===== BytesMut =====
1078 *
1079 */
1080
1081impl BytesMut {
1082    /// Creates a new `BytesMut` with the specified capacity.
1083    ///
1084    /// The returned `BytesMut` will be able to hold at least `capacity` bytes
1085    /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
1086    /// then `BytesMut` will not allocate.
1087    ///
1088    /// It is important to note that this function does not specify the length
1089    /// of the returned `BytesMut`, but only the capacity.
1090    ///
1091    /// # Panics
1092    ///
1093    /// Panics if `capacity` greater than 60bit for 64bit systems
1094    /// and 28bit for 32bit systems
1095    ///
1096    /// # Examples
1097    ///
1098    /// ```
1099    /// use ntex_bytes::{BytesMut, BufMut};
1100    ///
1101    /// let mut bytes = BytesMut::with_capacity(64);
1102    ///
1103    /// // `bytes` contains no data, even though there is capacity
1104    /// assert_eq!(bytes.len(), 0);
1105    ///
1106    /// bytes.put(&b"hello world"[..]);
1107    ///
1108    /// assert_eq!(&bytes[..], b"hello world");
1109    /// ```
1110    #[inline]
1111    pub fn with_capacity(capacity: usize) -> BytesMut {
1112        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1113    }
1114
1115    /// Creates a new `BytesMut` with the specified capacity and in specified memory pool.
1116    ///
1117    /// # Examples
1118    ///
1119    /// ```
1120    /// use ntex_bytes::{BytesMut, BufMut, PoolId};
1121    ///
1122    /// let mut bytes = BytesMut::with_capacity_in(64, PoolId::P1);
1123    ///
1124    /// // `bytes` contains no data, even though there is capacity
1125    /// assert_eq!(bytes.len(), 0);
1126    ///
1127    /// bytes.put(&b"hello world"[..]);
1128    ///
1129    /// assert_eq!(&bytes[..], b"hello world");
1130    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
1131    /// ```
1132    #[inline]
1133    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesMut
1134    where
1135        PoolRef: From<T>,
1136    {
1137        BytesMut {
1138            inner: Inner::with_capacity(capacity, pool.into()),
1139        }
1140    }
1141
1142    /// Creates a new `BytesMut` from slice, by copying it.
1143    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
1144        Self::copy_from_slice_in(src, PoolId::DEFAULT)
1145    }
1146
1147    /// Creates a new `BytesMut` from slice, by copying it.
1148    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
1149    where
1150        T: AsRef<[u8]>,
1151        PoolRef: From<U>,
1152    {
1153        let s = src.as_ref();
1154        BytesMut {
1155            inner: Inner::from_slice(s.len(), s, pool.into()),
1156        }
1157    }
1158
1159    #[inline]
1160    /// Convert a `Vec` into a `BytesMut`
1161    pub fn from_vec<T>(src: Vec<u8>, pool: T) -> BytesMut
1162    where
1163        PoolRef: From<T>,
1164    {
1165        BytesMut {
1166            inner: Inner::from_vec(src, pool.into()),
1167        }
1168    }
1169
1170    /// Creates a new `BytesMut` with default capacity.
1171    ///
1172    /// Resulting object has length 0 and unspecified capacity.
1173    /// This function does not allocate.
1174    ///
1175    /// # Examples
1176    ///
1177    /// ```
1178    /// use ntex_bytes::{BytesMut, BufMut};
1179    ///
1180    /// let mut bytes = BytesMut::new();
1181    ///
1182    /// assert_eq!(0, bytes.len());
1183    ///
1184    /// bytes.reserve(2);
1185    /// bytes.put_slice(b"xy");
1186    ///
1187    /// assert_eq!(&b"xy"[..], &bytes[..]);
1188    /// ```
1189    #[inline]
1190    pub fn new() -> BytesMut {
1191        BytesMut::with_capacity(MIN_NON_ZERO_CAP)
1192    }
1193
1194    /// Returns the number of bytes contained in this `BytesMut`.
1195    ///
1196    /// # Examples
1197    ///
1198    /// ```
1199    /// use ntex_bytes::BytesMut;
1200    ///
1201    /// let b = BytesMut::from(&b"hello"[..]);
1202    /// assert_eq!(b.len(), 5);
1203    /// ```
1204    #[inline]
1205    pub fn len(&self) -> usize {
1206        self.inner.len()
1207    }
1208
1209    /// Returns true if the `BytesMut` has a length of 0.
1210    ///
1211    /// # Examples
1212    ///
1213    /// ```
1214    /// use ntex_bytes::BytesMut;
1215    ///
1216    /// let b = BytesMut::with_capacity(64);
1217    /// assert!(b.is_empty());
1218    /// ```
1219    #[inline]
1220    pub fn is_empty(&self) -> bool {
1221        self.inner.is_empty()
1222    }
1223
1224    /// Returns the number of bytes the `BytesMut` can hold without reallocating.
1225    ///
1226    /// # Examples
1227    ///
1228    /// ```
1229    /// use ntex_bytes::BytesMut;
1230    ///
1231    /// let b = BytesMut::with_capacity(64);
1232    /// assert_eq!(b.capacity(), 64);
1233    /// ```
1234    #[inline]
1235    pub fn capacity(&self) -> usize {
1236        self.inner.capacity()
1237    }
1238
1239    /// Converts `self` into an immutable `Bytes`.
1240    ///
1241    /// The conversion is zero cost and is used to indicate that the slice
1242    /// referenced by the handle will no longer be mutated. Once the conversion
1243    /// is done, the handle can be cloned and shared across threads.
1244    ///
1245    /// # Examples
1246    ///
1247    /// ```
1248    /// use ntex_bytes::{BytesMut, BufMut};
1249    /// use std::thread;
1250    ///
1251    /// let mut b = BytesMut::with_capacity(64);
1252    /// b.put("hello world");
1253    /// let b1 = b.freeze();
1254    /// let b2 = b1.clone();
1255    ///
1256    /// let th = thread::spawn(move || {
1257    ///     assert_eq!(b1, b"hello world");
1258    /// });
1259    ///
1260    /// assert_eq!(b2, b"hello world");
1261    /// th.join().unwrap();
1262    /// ```
1263    #[inline]
1264    pub fn freeze(self) -> Bytes {
1265        if self.inner.len() <= INLINE_CAP {
1266            Bytes {
1267                inner: Inner::from_slice_inline(self.inner.as_ref()),
1268            }
1269        } else {
1270            Bytes { inner: self.inner }
1271        }
1272    }
1273
1274    /// Splits the bytes into two at the given index.
1275    ///
1276    /// Afterwards `self` contains elements `[0, at)`, and the returned
1277    /// `BytesMut` contains elements `[at, capacity)`.
1278    ///
1279    /// This is an `O(1)` operation that just increases the reference count
1280    /// and sets a few indices.
1281    ///
1282    /// # Examples
1283    ///
1284    /// ```
1285    /// use ntex_bytes::BytesMut;
1286    ///
1287    /// let mut a = BytesMut::from(&b"hello world"[..]);
1288    /// let mut b = a.split_off(5);
1289    ///
1290    /// a[0] = b'j';
1291    /// b[0] = b'!';
1292    ///
1293    /// assert_eq!(&a[..], b"jello");
1294    /// assert_eq!(&b[..], b"!world");
1295    /// ```
1296    ///
1297    /// # Panics
1298    ///
1299    /// Panics if `at > capacity`.
1300    pub fn split_off(&mut self, at: usize) -> BytesMut {
1301        BytesMut {
1302            inner: self.inner.split_off(at, false),
1303        }
1304    }
1305
1306    /// Removes the bytes from the current view, returning them in a new
1307    /// `BytesMut` handle.
1308    ///
1309    /// Afterwards, `self` will be empty, but will retain any additional
1310    /// capacity that it had before the operation. This is identical to
1311    /// `self.split_to(self.len())`.
1312    ///
1313    /// This is an `O(1)` operation that just increases the reference count and
1314    /// sets a few indices.
1315    ///
1316    /// # Examples
1317    ///
1318    /// ```
1319    /// use ntex_bytes::{BytesMut, BufMut};
1320    ///
1321    /// let mut buf = BytesMut::with_capacity(1024);
1322    /// buf.put(&b"hello world"[..]);
1323    ///
1324    /// let other = buf.split();
1325    ///
1326    /// assert!(buf.is_empty());
1327    /// assert_eq!(1013, buf.capacity());
1328    ///
1329    /// assert_eq!(other, b"hello world"[..]);
1330    /// ```
1331    pub fn split(&mut self) -> BytesMut {
1332        self.split_to(self.len())
1333    }
1334
1335    /// Splits the buffer into two at the given index.
1336    ///
1337    /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
1338    /// contains elements `[0, at)`.
1339    ///
1340    /// This is an `O(1)` operation that just increases the reference count and
1341    /// sets a few indices.
1342    ///
1343    /// # Examples
1344    ///
1345    /// ```
1346    /// use ntex_bytes::BytesMut;
1347    ///
1348    /// let mut a = BytesMut::from(&b"hello world"[..]);
1349    /// let mut b = a.split_to(5);
1350    ///
1351    /// a[0] = b'!';
1352    /// b[0] = b'j';
1353    ///
1354    /// assert_eq!(&a[..], b"!world");
1355    /// assert_eq!(&b[..], b"jello");
1356    /// ```
1357    ///
1358    /// # Panics
1359    ///
1360    /// Panics if `at > len`.
1361    pub fn split_to(&mut self, at: usize) -> BytesMut {
1362        self.split_to_checked(at)
1363            .expect("at value must be <= self.len()`")
1364    }
1365
1366    /// Splits the bytes into two at the given index.
1367    ///
1368    /// Does nothing if `at > len`.
1369    pub fn split_to_checked(&mut self, at: usize) -> Option<BytesMut> {
1370        if at <= self.len() {
1371            Some(BytesMut {
1372                inner: self.inner.split_to(at, false),
1373            })
1374        } else {
1375            None
1376        }
1377    }
1378
1379    /// Shortens the buffer, keeping the first `len` bytes and dropping the
1380    /// rest.
1381    ///
1382    /// If `len` is greater than the buffer's current length, this has no
1383    /// effect.
1384    ///
1385    /// The [`split_off`] method can emulate `truncate`, but this causes the
1386    /// excess bytes to be returned instead of dropped.
1387    ///
1388    /// # Examples
1389    ///
1390    /// ```
1391    /// use ntex_bytes::BytesMut;
1392    ///
1393    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1394    /// buf.truncate(5);
1395    /// assert_eq!(buf, b"hello"[..]);
1396    /// ```
1397    ///
1398    /// [`split_off`]: #method.split_off
1399    pub fn truncate(&mut self, len: usize) {
1400        self.inner.truncate(len, false);
1401    }
1402
1403    /// Clears the buffer, removing all data.
1404    ///
1405    /// # Examples
1406    ///
1407    /// ```
1408    /// use ntex_bytes::BytesMut;
1409    ///
1410    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1411    /// buf.clear();
1412    /// assert!(buf.is_empty());
1413    /// ```
1414    pub fn clear(&mut self) {
1415        self.truncate(0);
1416    }
1417
1418    /// Resizes the buffer so that `len` is equal to `new_len`.
1419    ///
1420    /// If `new_len` is greater than `len`, the buffer is extended by the
1421    /// difference with each additional byte set to `value`. If `new_len` is
1422    /// less than `len`, the buffer is simply truncated.
1423    ///
1424    /// # Panics
1425    ///
1426    /// Panics if `new_len` greater than 60bit for 64bit systems
1427    /// and 28bit for 32bit systems
1428    ///
1429    /// # Examples
1430    ///
1431    /// ```
1432    /// use ntex_bytes::BytesMut;
1433    ///
1434    /// let mut buf = BytesMut::new();
1435    ///
1436    /// buf.resize(3, 0x1);
1437    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
1438    ///
1439    /// buf.resize(2, 0x2);
1440    /// assert_eq!(&buf[..], &[0x1, 0x1]);
1441    ///
1442    /// buf.resize(4, 0x3);
1443    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
1444    /// ```
1445    #[inline]
1446    pub fn resize(&mut self, new_len: usize, value: u8) {
1447        self.inner.resize(new_len, value);
1448    }
1449
1450    /// Sets the length of the buffer.
1451    ///
1452    /// This will explicitly set the size of the buffer without actually
1453    /// modifying the data, so it is up to the caller to ensure that the data
1454    /// has been initialized.
1455    ///
1456    /// # Examples
1457    ///
1458    /// ```
1459    /// use ntex_bytes::BytesMut;
1460    ///
1461    /// let mut b = BytesMut::from(&b"hello world"[..]);
1462    ///
1463    /// unsafe {
1464    ///     b.set_len(5);
1465    /// }
1466    ///
1467    /// assert_eq!(&b[..], b"hello");
1468    ///
1469    /// unsafe {
1470    ///     b.set_len(11);
1471    /// }
1472    ///
1473    /// assert_eq!(&b[..], b"hello world");
1474    /// ```
1475    ///
1476    /// # Panics
1477    ///
1478    /// This method will panic if `len` is out of bounds for the underlying
1479    /// slice or if it comes after the `end` of the configured window.
1480    #[inline]
1481    #[allow(clippy::missing_safety_doc)]
1482    pub unsafe fn set_len(&mut self, len: usize) {
1483        self.inner.set_len(len)
1484    }
1485
1486    /// Reserves capacity for at least `additional` more bytes to be inserted
1487    /// into the given `BytesMut`.
1488    ///
1489    /// More than `additional` bytes may be reserved in order to avoid frequent
1490    /// reallocations. A call to `reserve` may result in an allocation.
1491    ///
1492    /// Before allocating new buffer space, the function will attempt to reclaim
1493    /// space in the existing buffer. If the current handle references a small
1494    /// view in the original buffer and all other handles have been dropped,
1495    /// and the requested capacity is less than or equal to the existing
1496    /// buffer's capacity, then the current view will be copied to the front of
1497    /// the buffer and the handle will take ownership of the full buffer.
1498    ///
1499    /// # Panics
1500    ///
1501    /// Panics if new capacity is greater than 60bit for 64bit systems
1502    /// and 28bit for 32bit systems
1503    ///
1504    /// # Examples
1505    ///
1506    /// In the following example, a new buffer is allocated.
1507    ///
1508    /// ```
1509    /// use ntex_bytes::BytesMut;
1510    ///
1511    /// let mut buf = BytesMut::from(&b"hello"[..]);
1512    /// buf.reserve(64);
1513    /// assert!(buf.capacity() >= 69);
1514    /// ```
1515    ///
1516    /// In the following example, the existing buffer is reclaimed.
1517    ///
1518    /// ```
1519    /// use ntex_bytes::{BytesMut, BufMut};
1520    ///
1521    /// let mut buf = BytesMut::with_capacity(128);
1522    /// buf.put(&[0; 64][..]);
1523    ///
1524    /// let ptr = buf.as_ptr();
1525    /// let other = buf.split();
1526    ///
1527    /// assert!(buf.is_empty());
1528    /// assert_eq!(buf.capacity(), 64);
1529    ///
1530    /// drop(other);
1531    /// buf.reserve(128);
1532    ///
1533    /// assert_eq!(buf.capacity(), 128);
1534    /// assert_eq!(buf.as_ptr(), ptr);
1535    /// ```
1536    ///
1537    /// # Panics
1538    ///
1539    /// Panics if the new capacity overflows `usize`.
1540    #[inline]
1541    pub fn reserve(&mut self, additional: usize) {
1542        let len = self.len();
1543        let rem = self.capacity() - len;
1544
1545        if additional <= rem {
1546            // The handle can already store at least `additional` more bytes, so
1547            // there is no further work needed to be done.
1548            return;
1549        }
1550
1551        self.inner.reserve_inner(additional);
1552    }
1553
1554    /// Appends given bytes to this object.
1555    ///
1556    /// If this `BytesMut` object has not enough capacity, it is resized first.
1557    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
1558    ///
1559    /// # Examples
1560    ///
1561    /// ```
1562    /// use ntex_bytes::BytesMut;
1563    ///
1564    /// let mut buf = BytesMut::with_capacity(0);
1565    /// buf.extend_from_slice(b"aaabbb");
1566    /// buf.extend_from_slice(b"cccddd");
1567    ///
1568    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
1569    /// ```
1570    #[inline]
1571    pub fn extend_from_slice(&mut self, extend: &[u8]) {
1572        self.put_slice(extend);
1573    }
1574
1575    /// Returns an iterator over the bytes contained by the buffer.
1576    ///
1577    /// # Examples
1578    ///
1579    /// ```
1580    /// use ntex_bytes::{Buf, BytesMut};
1581    ///
1582    /// let buf = BytesMut::from(&b"abc"[..]);
1583    /// let mut iter = buf.iter();
1584    ///
1585    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
1586    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
1587    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
1588    /// assert_eq!(iter.next(), None);
1589    /// ```
1590    #[inline]
1591    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
1592        self.chunk().iter()
1593    }
1594
1595    #[cfg(feature = "mpool")]
1596    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
1597        self.inner.move_to_pool(pool);
1598    }
1599}
1600
1601impl Buf for BytesMut {
1602    #[inline]
1603    fn remaining(&self) -> usize {
1604        self.len()
1605    }
1606
1607    #[inline]
1608    fn chunk(&self) -> &[u8] {
1609        self.inner.as_ref()
1610    }
1611
1612    #[inline]
1613    fn advance(&mut self, cnt: usize) {
1614        assert!(
1615            cnt <= self.inner.as_ref().len(),
1616            "cannot advance past `remaining`"
1617        );
1618        unsafe {
1619            self.inner.set_start(cnt);
1620        }
1621    }
1622}
1623
1624impl BufMut for BytesMut {
1625    #[inline]
1626    fn remaining_mut(&self) -> usize {
1627        self.capacity() - self.len()
1628    }
1629
1630    #[inline]
1631    unsafe fn advance_mut(&mut self, cnt: usize) {
1632        let new_len = self.len() + cnt;
1633
1634        // This call will panic if `cnt` is too big
1635        self.inner.set_len(new_len);
1636    }
1637
1638    #[inline]
1639    fn chunk_mut(&mut self) -> &mut UninitSlice {
1640        let len = self.len();
1641
1642        unsafe {
1643            // This will never panic as `len` can never become invalid
1644            let ptr = &mut self.inner.as_raw()[len..];
1645
1646            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
1647        }
1648    }
1649
1650    #[inline]
1651    fn put_slice(&mut self, src: &[u8]) {
1652        let len = src.len();
1653        self.reserve(len);
1654
1655        unsafe {
1656            ptr::copy_nonoverlapping(src.as_ptr(), self.chunk_mut().as_mut_ptr(), len);
1657            self.advance_mut(len);
1658        }
1659    }
1660
1661    #[inline]
1662    fn put_u8(&mut self, n: u8) {
1663        self.reserve(1);
1664        self.inner.put_u8(n);
1665    }
1666
1667    #[inline]
1668    fn put_i8(&mut self, n: i8) {
1669        self.reserve(1);
1670        self.put_u8(n as u8);
1671    }
1672}
1673
1674impl bytes::buf::Buf for BytesMut {
1675    #[inline]
1676    fn remaining(&self) -> usize {
1677        self.len()
1678    }
1679
1680    #[inline]
1681    fn chunk(&self) -> &[u8] {
1682        self.inner.as_ref()
1683    }
1684
1685    #[inline]
1686    fn advance(&mut self, cnt: usize) {
1687        Buf::advance(self, cnt)
1688    }
1689}
1690
1691unsafe impl bytes::buf::BufMut for BytesMut {
1692    #[inline]
1693    fn remaining_mut(&self) -> usize {
1694        BufMut::remaining_mut(self)
1695    }
1696
1697    #[inline]
1698    unsafe fn advance_mut(&mut self, cnt: usize) {
1699        BufMut::advance_mut(self, cnt)
1700    }
1701
1702    #[inline]
1703    fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice {
1704        let len = self.len();
1705        unsafe {
1706            // This will never panic as `len` can never become invalid
1707            let ptr = &mut self.inner.as_raw()[len..];
1708            bytes::buf::UninitSlice::from_raw_parts_mut(
1709                ptr.as_mut_ptr(),
1710                self.capacity() - len,
1711            )
1712        }
1713    }
1714
1715    #[inline]
1716    fn put_slice(&mut self, src: &[u8]) {
1717        BufMut::put_slice(self, src)
1718    }
1719
1720    #[inline]
1721    fn put_u8(&mut self, n: u8) {
1722        BufMut::put_u8(self, n)
1723    }
1724
1725    #[inline]
1726    fn put_i8(&mut self, n: i8) {
1727        BufMut::put_i8(self, n)
1728    }
1729}
1730
1731impl AsRef<[u8]> for BytesMut {
1732    #[inline]
1733    fn as_ref(&self) -> &[u8] {
1734        self.inner.as_ref()
1735    }
1736}
1737
1738impl AsMut<[u8]> for BytesMut {
1739    #[inline]
1740    fn as_mut(&mut self) -> &mut [u8] {
1741        self.inner.as_mut()
1742    }
1743}
1744
1745impl Deref for BytesMut {
1746    type Target = [u8];
1747
1748    #[inline]
1749    fn deref(&self) -> &[u8] {
1750        self.as_ref()
1751    }
1752}
1753
1754impl DerefMut for BytesMut {
1755    #[inline]
1756    fn deref_mut(&mut self) -> &mut [u8] {
1757        self.inner.as_mut()
1758    }
1759}
1760
1761impl From<Vec<u8>> for BytesMut {
1762    #[inline]
1763    /// Convert a `Vec` into a `BytesMut`
1764    ///
1765    /// This constructor may be used to avoid the inlining optimization used by
1766    /// `with_capacity`.  A `BytesMut` constructed this way will always store
1767    /// its data on the heap.
1768    fn from(src: Vec<u8>) -> BytesMut {
1769        BytesMut::from_vec(src, PoolId::DEFAULT.pool_ref())
1770    }
1771}
1772
1773impl From<String> for BytesMut {
1774    #[inline]
1775    fn from(src: String) -> BytesMut {
1776        BytesMut::from_vec(src.into_bytes(), PoolId::DEFAULT.pool_ref())
1777    }
1778}
1779
1780impl From<&BytesMut> for BytesMut {
1781    #[inline]
1782    fn from(src: &BytesMut) -> BytesMut {
1783        src.clone()
1784    }
1785}
1786
1787impl<'a> From<&'a [u8]> for BytesMut {
1788    fn from(src: &'a [u8]) -> BytesMut {
1789        if src.is_empty() {
1790            BytesMut::new()
1791        } else {
1792            BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1793        }
1794    }
1795}
1796
1797impl<const N: usize> From<[u8; N]> for BytesMut {
1798    fn from(src: [u8; N]) -> BytesMut {
1799        BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1800    }
1801}
1802
1803impl<'a, const N: usize> From<&'a [u8; N]> for BytesMut {
1804    fn from(src: &'a [u8; N]) -> BytesMut {
1805        BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1806    }
1807}
1808
1809impl<'a> From<&'a str> for BytesMut {
1810    #[inline]
1811    fn from(src: &'a str) -> BytesMut {
1812        BytesMut::from(src.as_bytes())
1813    }
1814}
1815
1816impl From<Bytes> for BytesMut {
1817    #[inline]
1818    fn from(src: Bytes) -> BytesMut {
1819        src.try_mut()
1820            .unwrap_or_else(|src| BytesMut::copy_from_slice_in(&src[..], src.inner.pool()))
1821    }
1822}
1823
1824impl From<&Bytes> for BytesMut {
1825    #[inline]
1826    fn from(src: &Bytes) -> BytesMut {
1827        BytesMut::copy_from_slice_in(&src[..], src.inner.pool())
1828    }
1829}
1830
1831impl Eq for BytesMut {}
1832
1833impl PartialEq for BytesMut {
1834    #[inline]
1835    fn eq(&self, other: &BytesMut) -> bool {
1836        self.inner.as_ref() == other.inner.as_ref()
1837    }
1838}
1839
1840impl Default for BytesMut {
1841    #[inline]
1842    fn default() -> BytesMut {
1843        BytesMut::new()
1844    }
1845}
1846
1847impl Borrow<[u8]> for BytesMut {
1848    #[inline]
1849    fn borrow(&self) -> &[u8] {
1850        self.as_ref()
1851    }
1852}
1853
1854impl BorrowMut<[u8]> for BytesMut {
1855    #[inline]
1856    fn borrow_mut(&mut self) -> &mut [u8] {
1857        self.as_mut()
1858    }
1859}
1860
1861impl fmt::Debug for BytesMut {
1862    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1863        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
1864    }
1865}
1866
1867impl fmt::Write for BytesMut {
1868    #[inline]
1869    fn write_str(&mut self, s: &str) -> fmt::Result {
1870        if self.remaining_mut() >= s.len() {
1871            self.put_slice(s.as_bytes());
1872            Ok(())
1873        } else {
1874            Err(fmt::Error)
1875        }
1876    }
1877
1878    #[inline]
1879    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1880        fmt::write(self, args)
1881    }
1882}
1883
1884impl Clone for BytesMut {
1885    #[inline]
1886    fn clone(&self) -> BytesMut {
1887        BytesMut::from(&self[..])
1888    }
1889}
1890
1891impl IntoIterator for BytesMut {
1892    type Item = u8;
1893    type IntoIter = IntoIter<BytesMut>;
1894
1895    fn into_iter(self) -> Self::IntoIter {
1896        IntoIter::new(self)
1897    }
1898}
1899
1900impl<'a> IntoIterator for &'a BytesMut {
1901    type Item = &'a u8;
1902    type IntoIter = std::slice::Iter<'a, u8>;
1903
1904    fn into_iter(self) -> Self::IntoIter {
1905        self.as_ref().iter()
1906    }
1907}
1908
1909impl FromIterator<u8> for BytesMut {
1910    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1911        let iter = into_iter.into_iter();
1912        let (min, maybe_max) = iter.size_hint();
1913
1914        let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min));
1915        for i in iter {
1916            out.reserve(1);
1917            out.put_u8(i);
1918        }
1919
1920        out
1921    }
1922}
1923
1924impl<'a> FromIterator<&'a u8> for BytesMut {
1925    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1926        into_iter.into_iter().copied().collect::<BytesMut>()
1927    }
1928}
1929
1930impl Extend<u8> for BytesMut {
1931    fn extend<T>(&mut self, iter: T)
1932    where
1933        T: IntoIterator<Item = u8>,
1934    {
1935        let iter = iter.into_iter();
1936
1937        let (lower, _) = iter.size_hint();
1938        self.reserve(lower);
1939
1940        for b in iter {
1941            self.put_u8(b);
1942        }
1943    }
1944}
1945
1946impl<'a> Extend<&'a u8> for BytesMut {
1947    fn extend<T>(&mut self, iter: T)
1948    where
1949        T: IntoIterator<Item = &'a u8>,
1950    {
1951        self.extend(iter.into_iter().copied())
1952    }
1953}
1954
1955/*
1956 *
1957 * ===== BytesVec =====
1958 *
1959 */
1960
1961impl BytesVec {
1962    /// Creates a new `BytesVec` with the specified capacity.
1963    ///
1964    /// The returned `BytesVec` will be able to hold at least `capacity` bytes
1965    /// without reallocating.
1966    ///
1967    /// It is important to note that this function does not specify the length
1968    /// of the returned `BytesVec`, but only the capacity.
1969    ///
1970    /// # Panics
1971    ///
1972    /// Panics if `capacity` greater than 60bit for 64bit systems
1973    /// and 28bit for 32bit systems
1974    ///
1975    /// # Examples
1976    ///
1977    /// ```
1978    /// use ntex_bytes::{BytesVec, BufMut};
1979    ///
1980    /// let mut bytes = BytesVec::with_capacity(64);
1981    ///
1982    /// // `bytes` contains no data, even though there is capacity
1983    /// assert_eq!(bytes.len(), 0);
1984    ///
1985    /// bytes.put(&b"hello world"[..]);
1986    ///
1987    /// assert_eq!(&bytes[..], b"hello world");
1988    /// ```
1989    #[inline]
1990    pub fn with_capacity(capacity: usize) -> BytesVec {
1991        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1992    }
1993
1994    /// Creates a new `BytesVec` with the specified capacity and in specified memory pool.
1995    ///
1996    /// # Examples
1997    ///
1998    /// ```
1999    /// use ntex_bytes::{BytesVec, BufMut, PoolId};
2000    ///
2001    /// let mut bytes = BytesVec::with_capacity_in(64, PoolId::P1);
2002    ///
2003    /// // `bytes` contains no data, even though there is capacity
2004    /// assert_eq!(bytes.len(), 0);
2005    ///
2006    /// bytes.put(&b"hello world"[..]);
2007    ///
2008    /// assert_eq!(&bytes[..], b"hello world");
2009    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
2010    /// ```
2011    #[inline]
2012    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesVec
2013    where
2014        PoolRef: From<T>,
2015    {
2016        BytesVec {
2017            inner: InnerVec::with_capacity(capacity, pool.into()),
2018        }
2019    }
2020
2021    /// Creates a new `BytesVec` from slice, by copying it.
2022    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
2023        Self::copy_from_slice_in(src, PoolId::DEFAULT)
2024    }
2025
2026    /// Creates a new `BytesVec` from slice, by copying it.
2027    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
2028    where
2029        T: AsRef<[u8]>,
2030        PoolRef: From<U>,
2031    {
2032        let s = src.as_ref();
2033        BytesVec {
2034            inner: InnerVec::from_slice(s.len(), s, pool.into()),
2035        }
2036    }
2037
2038    /// Creates a new `BytesVec` with default capacity.
2039    ///
2040    /// Resulting object has length 0 and unspecified capacity.
2041    /// This function does not allocate.
2042    ///
2043    /// # Examples
2044    ///
2045    /// ```
2046    /// use ntex_bytes::{BytesVec, BufMut};
2047    ///
2048    /// let mut bytes = BytesVec::new();
2049    ///
2050    /// assert_eq!(0, bytes.len());
2051    ///
2052    /// bytes.reserve(2);
2053    /// bytes.put_slice(b"xy");
2054    ///
2055    /// assert_eq!(&b"xy"[..], &bytes[..]);
2056    /// ```
2057    #[inline]
2058    pub fn new() -> BytesVec {
2059        BytesVec::with_capacity(MIN_NON_ZERO_CAP)
2060    }
2061
2062    /// Returns the number of bytes contained in this `BytesVec`.
2063    ///
2064    /// # Examples
2065    ///
2066    /// ```
2067    /// use ntex_bytes::BytesVec;
2068    ///
2069    /// let b = BytesVec::copy_from_slice(&b"hello"[..]);
2070    /// assert_eq!(b.len(), 5);
2071    /// ```
2072    #[inline]
2073    pub fn len(&self) -> usize {
2074        self.inner.len()
2075    }
2076
2077    /// Returns true if the `BytesVec` has a length of 0.
2078    ///
2079    /// # Examples
2080    ///
2081    /// ```
2082    /// use ntex_bytes::BytesVec;
2083    ///
2084    /// let b = BytesVec::with_capacity(64);
2085    /// assert!(b.is_empty());
2086    /// ```
2087    #[inline]
2088    pub fn is_empty(&self) -> bool {
2089        self.inner.len() == 0
2090    }
2091
2092    /// Returns the number of bytes the `BytesVec` can hold without reallocating.
2093    ///
2094    /// # Examples
2095    ///
2096    /// ```
2097    /// use ntex_bytes::BytesVec;
2098    ///
2099    /// let b = BytesVec::with_capacity(64);
2100    /// assert_eq!(b.capacity(), 64);
2101    /// ```
2102    #[inline]
2103    pub fn capacity(&self) -> usize {
2104        self.inner.capacity()
2105    }
2106
2107    /// Converts `self` into an immutable `Bytes`.
2108    ///
2109    /// The conversion is zero cost and is used to indicate that the slice
2110    /// referenced by the handle will no longer be mutated. Once the conversion
2111    /// is done, the handle can be cloned and shared across threads.
2112    ///
2113    /// # Examples
2114    ///
2115    /// ```
2116    /// use ntex_bytes::{BytesVec, BufMut};
2117    /// use std::thread;
2118    ///
2119    /// let mut b = BytesVec::with_capacity(64);
2120    /// b.put("hello world");
2121    /// let b1 = b.freeze();
2122    /// let b2 = b1.clone();
2123    ///
2124    /// let th = thread::spawn(move || {
2125    ///     assert_eq!(b1, b"hello world");
2126    /// });
2127    ///
2128    /// assert_eq!(b2, b"hello world");
2129    /// th.join().unwrap();
2130    /// ```
2131    #[inline]
2132    pub fn freeze(self) -> Bytes {
2133        Bytes {
2134            inner: self.inner.into_inner(),
2135        }
2136    }
2137
2138    /// Removes the bytes from the current view, returning them in a new
2139    /// `Bytes` instance.
2140    ///
2141    /// Afterwards, `self` will be empty, but will retain any additional
2142    /// capacity that it had before the operation. This is identical to
2143    /// `self.split_to(self.len())`.
2144    ///
2145    /// This is an `O(1)` operation that just increases the reference count and
2146    /// sets a few indices.
2147    ///
2148    /// # Examples
2149    ///
2150    /// ```
2151    /// use ntex_bytes::{BytesVec, BufMut};
2152    ///
2153    /// let mut buf = BytesVec::with_capacity(1024);
2154    /// buf.put(&b"hello world"[..]);
2155    ///
2156    /// let other = buf.split();
2157    ///
2158    /// assert!(buf.is_empty());
2159    /// assert_eq!(1013, buf.capacity());
2160    ///
2161    /// assert_eq!(other, b"hello world"[..]);
2162    /// ```
2163    pub fn split(&mut self) -> BytesMut {
2164        self.split_to(self.len())
2165    }
2166
2167    /// Splits the buffer into two at the given index.
2168    ///
2169    /// Afterwards `self` contains elements `[at, len)`, and the returned `Bytes`
2170    /// contains elements `[0, at)`.
2171    ///
2172    /// This is an `O(1)` operation that just increases the reference count and
2173    /// sets a few indices.
2174    ///
2175    /// # Examples
2176    ///
2177    /// ```
2178    /// use ntex_bytes::BytesVec;
2179    ///
2180    /// let mut a = BytesVec::copy_from_slice(&b"hello world"[..]);
2181    /// let mut b = a.split_to(5);
2182    ///
2183    /// a[0] = b'!';
2184    ///
2185    /// assert_eq!(&a[..], b"!world");
2186    /// assert_eq!(&b[..], b"hello");
2187    /// ```
2188    ///
2189    /// # Panics
2190    ///
2191    /// Panics if `at > len`.
2192    pub fn split_to(&mut self, at: usize) -> BytesMut {
2193        self.split_to_checked(at)
2194            .expect("at value must be <= self.len()`")
2195    }
2196
2197    /// Splits the bytes into two at the given index.
2198    ///
2199    /// Does nothing if `at > len`.
2200    pub fn split_to_checked(&mut self, at: usize) -> Option<BytesMut> {
2201        if at <= self.len() {
2202            Some(BytesMut {
2203                inner: self.inner.split_to(at, false),
2204            })
2205        } else {
2206            None
2207        }
2208    }
2209
2210    /// Shortens the buffer, keeping the first `len` bytes and dropping the
2211    /// rest.
2212    ///
2213    /// If `len` is greater than the buffer's current length, this has no
2214    /// effect.
2215    ///
2216    /// The [`split_off`] method can emulate `truncate`, but this causes the
2217    /// excess bytes to be returned instead of dropped.
2218    ///
2219    /// # Examples
2220    ///
2221    /// ```
2222    /// use ntex_bytes::BytesVec;
2223    ///
2224    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2225    /// buf.truncate(5);
2226    /// assert_eq!(buf, b"hello"[..]);
2227    /// ```
2228    ///
2229    /// [`split_off`]: #method.split_off
2230    pub fn truncate(&mut self, len: usize) {
2231        self.inner.truncate(len);
2232    }
2233
2234    /// Clears the buffer, removing all data.
2235    ///
2236    /// # Examples
2237    ///
2238    /// ```
2239    /// use ntex_bytes::BytesVec;
2240    ///
2241    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2242    /// buf.clear();
2243    /// assert!(buf.is_empty());
2244    /// ```
2245    pub fn clear(&mut self) {
2246        self.truncate(0);
2247    }
2248
2249    /// Resizes the buffer so that `len` is equal to `new_len`.
2250    ///
2251    /// If `new_len` is greater than `len`, the buffer is extended by the
2252    /// difference with each additional byte set to `value`. If `new_len` is
2253    /// less than `len`, the buffer is simply truncated.
2254    ///
2255    /// # Panics
2256    ///
2257    /// Panics if `new_len` greater than 60bit for 64bit systems
2258    /// and 28bit for 32bit systems
2259    ///
2260    /// # Examples
2261    ///
2262    /// ```
2263    /// use ntex_bytes::BytesVec;
2264    ///
2265    /// let mut buf = BytesVec::new();
2266    ///
2267    /// buf.resize(3, 0x1);
2268    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
2269    ///
2270    /// buf.resize(2, 0x2);
2271    /// assert_eq!(&buf[..], &[0x1, 0x1]);
2272    ///
2273    /// buf.resize(4, 0x3);
2274    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
2275    /// ```
2276    #[inline]
2277    pub fn resize(&mut self, new_len: usize, value: u8) {
2278        self.inner.resize(new_len, value);
2279    }
2280
2281    /// Sets the length of the buffer.
2282    ///
2283    /// This will explicitly set the size of the buffer without actually
2284    /// modifying the data, so it is up to the caller to ensure that the data
2285    /// has been initialized.
2286    ///
2287    /// # Examples
2288    ///
2289    /// ```
2290    /// use ntex_bytes::BytesVec;
2291    ///
2292    /// let mut b = BytesVec::copy_from_slice(&b"hello world"[..]);
2293    ///
2294    /// unsafe {
2295    ///     b.set_len(5);
2296    /// }
2297    ///
2298    /// assert_eq!(&b[..], b"hello");
2299    ///
2300    /// unsafe {
2301    ///     b.set_len(11);
2302    /// }
2303    ///
2304    /// assert_eq!(&b[..], b"hello world");
2305    /// ```
2306    ///
2307    /// # Panics
2308    ///
2309    /// This method will panic if `len` is out of bounds for the underlying
2310    /// slice or if it comes after the `end` of the configured window.
2311    #[inline]
2312    #[allow(clippy::missing_safety_doc)]
2313    pub unsafe fn set_len(&mut self, len: usize) {
2314        self.inner.set_len(len)
2315    }
2316
2317    /// Reserves capacity for at least `additional` more bytes to be inserted
2318    /// into the given `BytesVec`.
2319    ///
2320    /// More than `additional` bytes may be reserved in order to avoid frequent
2321    /// reallocations. A call to `reserve` may result in an allocation.
2322    ///
2323    /// Before allocating new buffer space, the function will attempt to reclaim
2324    /// space in the existing buffer. If the current handle references a small
2325    /// view in the original buffer and all other handles have been dropped,
2326    /// and the requested capacity is less than or equal to the existing
2327    /// buffer's capacity, then the current view will be copied to the front of
2328    /// the buffer and the handle will take ownership of the full buffer.
2329    ///
2330    /// # Panics
2331    ///
2332    /// Panics if new capacity is greater than 60bit for 64bit systems
2333    /// and 28bit for 32bit systems
2334    ///
2335    /// # Examples
2336    ///
2337    /// In the following example, a new buffer is allocated.
2338    ///
2339    /// ```
2340    /// use ntex_bytes::BytesVec;
2341    ///
2342    /// let mut buf = BytesVec::copy_from_slice(&b"hello"[..]);
2343    /// buf.reserve(64);
2344    /// assert!(buf.capacity() >= 69);
2345    /// ```
2346    ///
2347    /// In the following example, the existing buffer is reclaimed.
2348    ///
2349    /// ```
2350    /// use ntex_bytes::{BytesVec, BufMut};
2351    ///
2352    /// let mut buf = BytesVec::with_capacity(128);
2353    /// buf.put(&[0; 64][..]);
2354    ///
2355    /// let ptr = buf.as_ptr();
2356    /// let other = buf.split();
2357    ///
2358    /// assert!(buf.is_empty());
2359    /// assert_eq!(buf.capacity(), 64);
2360    ///
2361    /// drop(other);
2362    /// buf.reserve(128);
2363    ///
2364    /// assert_eq!(buf.capacity(), 128);
2365    /// assert_eq!(buf.as_ptr(), ptr);
2366    /// ```
2367    ///
2368    /// # Panics
2369    ///
2370    /// Panics if the new capacity overflows `usize`.
2371    #[inline]
2372    pub fn reserve(&mut self, additional: usize) {
2373        let len = self.len();
2374        let rem = self.capacity() - len;
2375
2376        if additional <= rem {
2377            // The handle can already store at least `additional` more bytes, so
2378            // there is no further work needed to be done.
2379            return;
2380        }
2381
2382        self.inner.reserve_inner(additional);
2383    }
2384
2385    /// Appends given bytes to this object.
2386    ///
2387    /// If this `BytesVec` object has not enough capacity, it is resized first.
2388    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
2389    ///
2390    /// # Examples
2391    ///
2392    /// ```
2393    /// use ntex_bytes::BytesVec;
2394    ///
2395    /// let mut buf = BytesVec::with_capacity(0);
2396    /// buf.extend_from_slice(b"aaabbb");
2397    /// buf.extend_from_slice(b"cccddd");
2398    ///
2399    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
2400    /// ```
2401    #[inline]
2402    pub fn extend_from_slice(&mut self, extend: &[u8]) {
2403        self.put_slice(extend);
2404    }
2405
2406    /// Run provided function with `BytesMut` instance that contains current data.
2407    #[inline]
2408    pub fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2409    where
2410        F: FnOnce(&mut BytesMut) -> R,
2411    {
2412        self.inner.with_bytes_mut(f)
2413    }
2414
2415    /// Returns an iterator over the bytes contained by the buffer.
2416    ///
2417    /// # Examples
2418    ///
2419    /// ```
2420    /// use ntex_bytes::{Buf, BytesVec};
2421    ///
2422    /// let buf = BytesVec::copy_from_slice(&b"abc"[..]);
2423    /// let mut iter = buf.iter();
2424    ///
2425    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
2426    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
2427    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
2428    /// assert_eq!(iter.next(), None);
2429    /// ```
2430    #[inline]
2431    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
2432        self.chunk().iter()
2433    }
2434
2435    #[cfg(feature = "mpool")]
2436    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
2437        self.inner.move_to_pool(pool);
2438    }
2439}
2440
2441impl Buf for BytesVec {
2442    #[inline]
2443    fn remaining(&self) -> usize {
2444        self.len()
2445    }
2446
2447    #[inline]
2448    fn chunk(&self) -> &[u8] {
2449        self.inner.as_ref()
2450    }
2451
2452    #[inline]
2453    fn advance(&mut self, cnt: usize) {
2454        assert!(
2455            cnt <= self.inner.as_ref().len(),
2456            "cannot advance past `remaining`"
2457        );
2458        unsafe {
2459            self.inner.set_start(cnt as u32);
2460        }
2461    }
2462}
2463
2464impl BufMut for BytesVec {
2465    #[inline]
2466    fn remaining_mut(&self) -> usize {
2467        self.capacity() - self.len()
2468    }
2469
2470    #[inline]
2471    unsafe fn advance_mut(&mut self, cnt: usize) {
2472        let new_len = self.len() + cnt;
2473
2474        // This call will panic if `cnt` is too big
2475        self.inner.set_len(new_len);
2476    }
2477
2478    #[inline]
2479    fn chunk_mut(&mut self) -> &mut UninitSlice {
2480        let len = self.len();
2481
2482        unsafe {
2483            // This will never panic as `len` can never become invalid
2484            let ptr = &mut self.inner.as_raw()[len..];
2485
2486            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
2487        }
2488    }
2489
2490    #[inline]
2491    fn put_slice(&mut self, src: &[u8]) {
2492        let len = src.len();
2493        self.reserve(len);
2494
2495        unsafe {
2496            ptr::copy_nonoverlapping(src.as_ptr(), self.chunk_mut().as_mut_ptr(), len);
2497            self.advance_mut(len);
2498        }
2499    }
2500
2501    #[inline]
2502    fn put_u8(&mut self, n: u8) {
2503        self.reserve(1);
2504        self.inner.put_u8(n);
2505    }
2506
2507    #[inline]
2508    fn put_i8(&mut self, n: i8) {
2509        self.reserve(1);
2510        self.put_u8(n as u8);
2511    }
2512}
2513
2514impl AsRef<[u8]> for BytesVec {
2515    #[inline]
2516    fn as_ref(&self) -> &[u8] {
2517        self.inner.as_ref()
2518    }
2519}
2520
2521impl AsMut<[u8]> for BytesVec {
2522    #[inline]
2523    fn as_mut(&mut self) -> &mut [u8] {
2524        self.inner.as_mut()
2525    }
2526}
2527
2528impl Deref for BytesVec {
2529    type Target = [u8];
2530
2531    #[inline]
2532    fn deref(&self) -> &[u8] {
2533        self.as_ref()
2534    }
2535}
2536
2537impl DerefMut for BytesVec {
2538    #[inline]
2539    fn deref_mut(&mut self) -> &mut [u8] {
2540        self.inner.as_mut()
2541    }
2542}
2543
2544impl Eq for BytesVec {}
2545
2546impl PartialEq for BytesVec {
2547    #[inline]
2548    fn eq(&self, other: &BytesVec) -> bool {
2549        self.inner.as_ref() == other.inner.as_ref()
2550    }
2551}
2552
2553impl Default for BytesVec {
2554    #[inline]
2555    fn default() -> BytesVec {
2556        BytesVec::new()
2557    }
2558}
2559
2560impl Borrow<[u8]> for BytesVec {
2561    #[inline]
2562    fn borrow(&self) -> &[u8] {
2563        self.as_ref()
2564    }
2565}
2566
2567impl BorrowMut<[u8]> for BytesVec {
2568    #[inline]
2569    fn borrow_mut(&mut self) -> &mut [u8] {
2570        self.as_mut()
2571    }
2572}
2573
2574impl fmt::Debug for BytesVec {
2575    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2576        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
2577    }
2578}
2579
2580impl fmt::Write for BytesVec {
2581    #[inline]
2582    fn write_str(&mut self, s: &str) -> fmt::Result {
2583        if self.remaining_mut() >= s.len() {
2584            self.put_slice(s.as_bytes());
2585            Ok(())
2586        } else {
2587            Err(fmt::Error)
2588        }
2589    }
2590
2591    #[inline]
2592    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
2593        fmt::write(self, args)
2594    }
2595}
2596
2597impl IntoIterator for BytesVec {
2598    type Item = u8;
2599    type IntoIter = IntoIter<BytesVec>;
2600
2601    fn into_iter(self) -> Self::IntoIter {
2602        IntoIter::new(self)
2603    }
2604}
2605
2606impl<'a> IntoIterator for &'a BytesVec {
2607    type Item = &'a u8;
2608    type IntoIter = std::slice::Iter<'a, u8>;
2609
2610    fn into_iter(self) -> Self::IntoIter {
2611        self.as_ref().iter()
2612    }
2613}
2614
2615impl FromIterator<u8> for BytesVec {
2616    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
2617        let iter = into_iter.into_iter();
2618        let (min, maybe_max) = iter.size_hint();
2619
2620        let mut out = BytesVec::with_capacity(maybe_max.unwrap_or(min));
2621        for i in iter {
2622            out.reserve(1);
2623            out.put_u8(i);
2624        }
2625
2626        out
2627    }
2628}
2629
2630impl<'a> FromIterator<&'a u8> for BytesVec {
2631    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
2632        into_iter.into_iter().copied().collect::<BytesVec>()
2633    }
2634}
2635
2636impl Extend<u8> for BytesVec {
2637    fn extend<T>(&mut self, iter: T)
2638    where
2639        T: IntoIterator<Item = u8>,
2640    {
2641        let iter = iter.into_iter();
2642
2643        let (lower, _) = iter.size_hint();
2644        self.reserve(lower);
2645
2646        for b in iter {
2647            self.put_u8(b);
2648        }
2649    }
2650}
2651
2652impl<'a> Extend<&'a u8> for BytesVec {
2653    fn extend<T>(&mut self, iter: T)
2654    where
2655        T: IntoIterator<Item = &'a u8>,
2656    {
2657        self.extend(iter.into_iter().copied())
2658    }
2659}
2660
2661struct InnerVec(NonNull<SharedVec>);
2662
2663impl InnerVec {
2664    #[inline]
2665    fn with_capacity(capacity: usize, pool: PoolRef) -> InnerVec {
2666        Self::from_slice(capacity, &[], pool)
2667    }
2668
2669    #[inline]
2670    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> InnerVec {
2671        // vec must be aligned to SharedVec instead of u8
2672        let vec_cap = if cap % SHARED_VEC_SIZE != 0 {
2673            (cap / SHARED_VEC_SIZE) + 2
2674        } else {
2675            (cap / SHARED_VEC_SIZE) + 1
2676        };
2677        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
2678        unsafe {
2679            // Store data in vec
2680            let len = src.len() as u32;
2681            let cap = vec.capacity() * SHARED_VEC_SIZE;
2682            let shared_ptr = vec.as_mut_ptr();
2683            mem::forget(vec);
2684            pool.acquire(cap);
2685
2686            let ptr = shared_ptr.add(1) as *mut u8;
2687            if !src.is_empty() {
2688                ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
2689            }
2690            ptr::write(
2691                shared_ptr,
2692                SharedVec {
2693                    len,
2694                    cap,
2695                    pool,
2696                    ref_count: AtomicUsize::new(1),
2697                    offset: SHARED_VEC_SIZE as u32,
2698                },
2699            );
2700
2701            InnerVec(NonNull::new_unchecked(shared_ptr))
2702        }
2703    }
2704
2705    #[cfg(feature = "mpool")]
2706    #[inline]
2707    fn move_to_pool(&mut self, pool: PoolRef) {
2708        unsafe {
2709            let inner = self.as_inner();
2710            if pool != inner.pool {
2711                pool.acquire(inner.cap);
2712                let pool = mem::replace(&mut inner.pool, pool);
2713                pool.release(inner.cap);
2714            }
2715        }
2716    }
2717
2718    /// Return a slice for the handle's view into the shared buffer
2719    #[inline]
2720    fn as_ref(&self) -> &[u8] {
2721        unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
2722    }
2723
2724    /// Return a mutable slice for the handle's view into the shared buffer
2725    #[inline]
2726    fn as_mut(&mut self) -> &mut [u8] {
2727        unsafe { slice::from_raw_parts_mut(self.as_ptr(), self.len()) }
2728    }
2729
2730    /// Return a mutable slice for the handle's view into the shared buffer
2731    /// including potentially uninitialized bytes.
2732    #[inline]
2733    unsafe fn as_raw(&mut self) -> &mut [u8] {
2734        slice::from_raw_parts_mut(self.as_ptr(), self.capacity())
2735    }
2736
2737    /// Return a raw pointer to data
2738    #[inline]
2739    unsafe fn as_ptr(&self) -> *mut u8 {
2740        (self.0.as_ptr() as *mut u8).add((*self.0.as_ptr()).offset as usize)
2741    }
2742
2743    #[inline]
2744    unsafe fn as_inner(&mut self) -> &mut SharedVec {
2745        self.0.as_mut()
2746    }
2747
2748    /// Insert a byte into the next slot and advance the len by 1.
2749    #[inline]
2750    fn put_u8(&mut self, n: u8) {
2751        unsafe {
2752            let inner = self.as_inner();
2753            let len = inner.len as usize;
2754            assert!(len < (inner.cap - inner.offset as usize));
2755            inner.len += 1;
2756            *self.as_ptr().add(len) = n;
2757        }
2758    }
2759
2760    #[inline]
2761    fn len(&self) -> usize {
2762        unsafe { (*self.0.as_ptr()).len as usize }
2763    }
2764
2765    /// slice.
2766    #[inline]
2767    unsafe fn set_len(&mut self, len: usize) {
2768        let inner = self.as_inner();
2769        assert!(len <= (inner.cap - inner.offset as usize) && len < u32::MAX as usize);
2770        inner.len = len as u32;
2771    }
2772
2773    #[inline]
2774    fn capacity(&self) -> usize {
2775        unsafe { (*self.0.as_ptr()).cap - (*self.0.as_ptr()).offset as usize }
2776    }
2777
2778    fn into_inner(mut self) -> Inner {
2779        unsafe {
2780            let ptr = self.as_ptr();
2781
2782            if self.len() <= INLINE_CAP {
2783                Inner::from_ptr_inline(ptr, self.len())
2784            } else {
2785                let inner = self.as_inner();
2786
2787                let inner = Inner {
2788                    ptr,
2789                    len: inner.len as usize,
2790                    cap: inner.cap - inner.offset as usize,
2791                    arc: NonNull::new_unchecked(
2792                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2793                    ),
2794                };
2795                mem::forget(self);
2796                inner
2797            }
2798        }
2799    }
2800
2801    fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2802    where
2803        F: FnOnce(&mut BytesMut) -> R,
2804    {
2805        unsafe {
2806            // create Inner for BytesMut
2807            let ptr = self.as_ptr();
2808            let inner = self.as_inner();
2809            let inner = Inner {
2810                ptr,
2811                len: inner.len as usize,
2812                cap: inner.cap - inner.offset as usize,
2813                arc: NonNull::new_unchecked(
2814                    (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2815                ),
2816            };
2817
2818            // run function
2819            let mut buf = BytesMut { inner };
2820            let result = f(&mut buf);
2821
2822            // convert BytesMut back to InnerVec
2823            let kind = buf.inner.kind();
2824            let new_inner =
2825                // only KIND_VEC could be converted to self, otherwise we have to copy data
2826                if kind == KIND_INLINE || kind == KIND_STATIC || kind == KIND_ARC {
2827                    InnerVec::from_slice(
2828                        buf.inner.capacity(),
2829                        buf.inner.as_ref(),
2830                        buf.inner.pool(),
2831                    )
2832                } else if kind == KIND_VEC {
2833                    let ptr = buf.inner.shared_vec();
2834                    let offset = buf.inner.ptr as usize - ptr as usize;
2835
2836                    // we cannot use shared vec if BytesMut points to inside of vec
2837                    if buf.inner.cap < (*ptr).cap - offset {
2838                        InnerVec::from_slice(
2839                            buf.inner.capacity(),
2840                            buf.inner.as_ref(),
2841                            buf.inner.pool(),
2842                        )
2843                    } else {
2844                        // BytesMut owns rest of the vec, so re-use
2845                        (*ptr).len = buf.len() as u32;
2846                        (*ptr).offset = offset as u32;
2847                        let inner = InnerVec(NonNull::new_unchecked(ptr));
2848                        mem::forget(buf); // reuse bytes
2849                        inner
2850                    }
2851                } else {
2852                    panic!()
2853                };
2854
2855            // drop old inner, we cannot drop because BytesMut used it
2856            let old = mem::replace(self, new_inner);
2857            mem::forget(old);
2858
2859            result
2860        }
2861    }
2862
2863    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
2864        unsafe {
2865            let ptr = self.as_ptr();
2866
2867            let other = if create_inline && at <= INLINE_CAP {
2868                Inner::from_ptr_inline(ptr, at)
2869            } else {
2870                let inner = self.as_inner();
2871                let old_size = inner.ref_count.fetch_add(1, Relaxed);
2872                if old_size == usize::MAX {
2873                    abort();
2874                }
2875
2876                Inner {
2877                    ptr,
2878                    len: at,
2879                    cap: at,
2880                    arc: NonNull::new_unchecked(
2881                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2882                    ),
2883                }
2884            };
2885            self.set_start(at as u32);
2886
2887            other
2888        }
2889    }
2890
2891    fn truncate(&mut self, len: usize) {
2892        unsafe {
2893            // try to reclaim the buffer. This is possible if the current
2894            // handle is the only outstanding handle pointing to the buffer.
2895            if len == 0 {
2896                let inner = self.as_inner();
2897                if inner.is_unique() && inner.offset != SHARED_VEC_SIZE as u32 {
2898                    inner.offset = SHARED_VEC_SIZE as u32;
2899                }
2900            }
2901
2902            if len <= self.len() {
2903                self.set_len(len);
2904            }
2905        }
2906    }
2907
2908    fn resize(&mut self, new_len: usize, value: u8) {
2909        let len = self.len();
2910        if new_len > len {
2911            let additional = new_len - len;
2912            self.reserve(additional);
2913            unsafe {
2914                let dst = self.as_raw()[len..].as_mut_ptr();
2915                ptr::write_bytes(dst, value, additional);
2916                self.set_len(new_len);
2917            }
2918        } else {
2919            self.truncate(new_len);
2920        }
2921    }
2922
2923    #[inline]
2924    fn reserve(&mut self, additional: usize) {
2925        let len = self.len();
2926        let rem = self.capacity() - len;
2927
2928        if additional <= rem {
2929            // The handle can already store at least `additional` more bytes, so
2930            // there is no further work needed to be done.
2931            return;
2932        }
2933
2934        self.reserve_inner(additional)
2935    }
2936
2937    #[inline]
2938    // In separate function to allow the short-circuits in `reserve` to
2939    // be inline-able. Significant helps performance.
2940    fn reserve_inner(&mut self, additional: usize) {
2941        let len = self.len();
2942
2943        // Reserving involves abandoning the currently shared buffer and
2944        // allocating a new vector with the requested capacity.
2945        let new_cap = len + additional;
2946
2947        unsafe {
2948            let inner = self.as_inner();
2949            let vec_cap = inner.cap - SHARED_VEC_SIZE;
2950
2951            // try to reclaim the buffer. This is possible if the current
2952            // handle is the only outstanding handle pointing to the buffer.
2953            if inner.is_unique() && vec_cap >= new_cap {
2954                let offset = inner.offset;
2955                inner.offset = SHARED_VEC_SIZE as u32;
2956
2957                // The capacity is sufficient, reclaim the buffer
2958                let src = (self.0.as_ptr() as *mut u8).add(offset as usize);
2959                let dst = (self.0.as_ptr() as *mut u8).add(SHARED_VEC_SIZE);
2960                ptr::copy(src, dst, len);
2961            } else {
2962                // Create a new vector storage
2963                let pool = inner.pool;
2964                *self = InnerVec::from_slice(new_cap, self.as_ref(), pool);
2965            }
2966        }
2967    }
2968
2969    unsafe fn set_start(&mut self, start: u32) {
2970        // Setting the start to 0 is a no-op, so return early if this is the
2971        // case.
2972        if start == 0 {
2973            return;
2974        }
2975
2976        let inner = self.as_inner();
2977        assert!(start <= inner.cap as u32);
2978
2979        // Updating the start of the view is setting `offset` to point to the
2980        // new start and updating the `len` field to reflect the new length
2981        // of the view.
2982        inner.offset += start;
2983
2984        if inner.len >= start {
2985            inner.len -= start;
2986        } else {
2987            inner.len = 0;
2988        }
2989    }
2990}
2991
2992impl Drop for InnerVec {
2993    fn drop(&mut self) {
2994        release_shared_vec(self.0.as_ptr());
2995    }
2996}
2997
2998/*
2999 *
3000 * ===== Inner =====
3001 *
3002 */
3003
3004impl Inner {
3005    #[inline]
3006    const fn from_static(bytes: &'static [u8]) -> Inner {
3007        let ptr = bytes.as_ptr() as *mut u8;
3008
3009        Inner {
3010            // `arc` won't ever store a pointer. Instead, use it to
3011            // track the fact that the `Bytes` handle is backed by a
3012            // static buffer.
3013            arc: unsafe { NonNull::new_unchecked(KIND_STATIC as *mut Shared) },
3014            ptr,
3015            len: bytes.len(),
3016            cap: bytes.len(),
3017        }
3018    }
3019
3020    #[inline]
3021    const fn empty_inline() -> Inner {
3022        Inner {
3023            arc: unsafe { NonNull::new_unchecked(KIND_INLINE as *mut Shared) },
3024            ptr: 0 as *mut u8,
3025            len: 0,
3026            cap: 0,
3027        }
3028    }
3029
3030    #[inline]
3031    fn from_vec(mut vec: Vec<u8>, pool: PoolRef) -> Inner {
3032        let len = vec.len();
3033        let cap = vec.capacity();
3034        let ptr = vec.as_mut_ptr();
3035        pool.acquire(cap);
3036
3037        // Store data in arc
3038        let shared = Box::into_raw(Box::new(Shared {
3039            vec,
3040            pool,
3041            ref_count: AtomicUsize::new(1),
3042        }));
3043
3044        // The pointer should be aligned, so this assert should always succeed.
3045        debug_assert!(0 == (shared as usize & KIND_MASK));
3046
3047        // Create new arc, so atomic operations can be avoided.
3048        Inner {
3049            ptr,
3050            len,
3051            cap,
3052            arc: unsafe { NonNull::new_unchecked(shared) },
3053        }
3054    }
3055
3056    #[inline]
3057    fn with_capacity(capacity: usize, pool: PoolRef) -> Inner {
3058        Inner::from_slice(capacity, &[], pool)
3059    }
3060
3061    #[inline]
3062    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> Inner {
3063        // vec must be aligned to SharedVec instead of u8
3064        let mut vec_cap = (cap / SHARED_VEC_SIZE) + 1;
3065        if cap % SHARED_VEC_SIZE != 0 {
3066            vec_cap += 1;
3067        }
3068        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
3069
3070        // Store data in vec
3071        let len = src.len();
3072        let full_cap = vec.capacity() * SHARED_VEC_SIZE;
3073        let cap = full_cap - SHARED_VEC_SIZE;
3074        vec.push(SharedVec {
3075            pool,
3076            cap: full_cap,
3077            ref_count: AtomicUsize::new(1),
3078            len: 0,
3079            offset: 0,
3080        });
3081        pool.acquire(full_cap);
3082
3083        let shared_ptr = vec.as_mut_ptr();
3084        mem::forget(vec);
3085
3086        let (ptr, arc) = unsafe {
3087            let ptr = shared_ptr.add(1) as *mut u8;
3088            ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
3089            let arc =
3090                NonNull::new_unchecked((shared_ptr as usize ^ KIND_VEC) as *mut Shared);
3091            (ptr, arc)
3092        };
3093
3094        // Create new arc, so atomic operations can be avoided.
3095        Inner { len, cap, ptr, arc }
3096    }
3097
3098    #[inline]
3099    fn from_slice_inline(src: &[u8]) -> Inner {
3100        unsafe { Inner::from_ptr_inline(src.as_ptr(), src.len()) }
3101    }
3102
3103    #[inline]
3104    unsafe fn from_ptr_inline(src: *const u8, len: usize) -> Inner {
3105        let mut inner = Inner {
3106            arc: NonNull::new_unchecked(KIND_INLINE as *mut Shared),
3107            ptr: ptr::null_mut(),
3108            len: 0,
3109            cap: 0,
3110        };
3111
3112        let dst = inner.inline_ptr();
3113        ptr::copy(src, dst, len);
3114        inner.set_inline_len(len);
3115        inner
3116    }
3117
3118    #[inline]
3119    fn pool(&self) -> PoolRef {
3120        let kind = self.kind();
3121
3122        if kind == KIND_VEC {
3123            unsafe { (*self.shared_vec()).pool }
3124        } else if kind == KIND_ARC {
3125            unsafe { (*self.arc.as_ptr()).pool }
3126        } else {
3127            PoolId::DEFAULT.pool_ref()
3128        }
3129    }
3130
3131    #[cfg(feature = "mpool")]
3132    #[inline]
3133    fn move_to_pool(&mut self, pool: PoolRef) {
3134        let kind = self.kind();
3135
3136        if kind == KIND_VEC {
3137            let vec = self.shared_vec();
3138            unsafe {
3139                let cap = (*vec).cap;
3140                pool.acquire(cap);
3141                let pool = mem::replace(&mut (*vec).pool, pool);
3142                pool.release(cap);
3143            }
3144        } else if kind == KIND_ARC {
3145            let arc = self.arc.as_ptr();
3146            unsafe {
3147                let cap = (*arc).vec.capacity();
3148                pool.acquire(cap);
3149                let pool = mem::replace(&mut (*arc).pool, pool);
3150                pool.release(cap);
3151            }
3152        }
3153    }
3154
3155    /// Return a slice for the handle's view into the shared buffer
3156    #[inline]
3157    fn as_ref(&self) -> &[u8] {
3158        unsafe {
3159            if self.is_inline() {
3160                slice::from_raw_parts(self.inline_ptr_ro(), self.inline_len())
3161            } else {
3162                slice::from_raw_parts(self.ptr, self.len)
3163            }
3164        }
3165    }
3166
3167    /// Return a mutable slice for the handle's view into the shared buffer
3168    #[inline]
3169    fn as_mut(&mut self) -> &mut [u8] {
3170        debug_assert!(!self.is_static());
3171
3172        unsafe {
3173            if self.is_inline() {
3174                slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
3175            } else {
3176                slice::from_raw_parts_mut(self.ptr, self.len)
3177            }
3178        }
3179    }
3180
3181    /// Return a mutable slice for the handle's view into the shared buffer
3182    /// including potentially uninitialized bytes.
3183    #[inline]
3184    unsafe fn as_raw(&mut self) -> &mut [u8] {
3185        debug_assert!(!self.is_static());
3186
3187        if self.is_inline() {
3188            slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
3189        } else {
3190            slice::from_raw_parts_mut(self.ptr, self.cap)
3191        }
3192    }
3193
3194    /// Return a raw pointer to data
3195    #[inline]
3196    unsafe fn as_ptr(&mut self) -> *mut u8 {
3197        if self.is_inline() {
3198            self.inline_ptr()
3199        } else {
3200            self.ptr
3201        }
3202    }
3203
3204    /// Insert a byte into the next slot and advance the len by 1.
3205    #[inline]
3206    fn put_u8(&mut self, n: u8) {
3207        if self.is_inline() {
3208            let len = self.inline_len();
3209            assert!(len < INLINE_CAP);
3210            unsafe {
3211                *self.inline_ptr().add(len) = n;
3212            }
3213            self.set_inline_len(len + 1);
3214        } else {
3215            assert!(self.len < self.cap);
3216            unsafe {
3217                *self.ptr.add(self.len) = n;
3218            }
3219            self.len += 1;
3220        }
3221    }
3222
3223    #[inline]
3224    fn len(&self) -> usize {
3225        if self.is_inline() {
3226            self.inline_len()
3227        } else {
3228            self.len
3229        }
3230    }
3231
3232    /// Pointer to the start of the inline buffer
3233    #[inline]
3234    unsafe fn inline_ptr(&mut self) -> *mut u8 {
3235        (self as *mut Inner as *mut u8).offset(INLINE_DATA_OFFSET)
3236    }
3237
3238    /// Pointer to the start of the inline buffer
3239    #[inline]
3240    unsafe fn inline_ptr_ro(&self) -> *const u8 {
3241        (self as *const Inner as *const u8).offset(INLINE_DATA_OFFSET)
3242    }
3243
3244    #[inline]
3245    fn inline_len(&self) -> usize {
3246        // This is undefind behavior due to a data race, but experimental
3247        // evidence shows that it works in practice (discussion:
3248        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3249        (self.arc.as_ptr() as usize & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
3250    }
3251
3252    /// Set the length of the inline buffer. This is done by writing to the
3253    /// least significant byte of the `arc` field.
3254    #[inline]
3255    fn set_inline_len(&mut self, len: usize) {
3256        debug_assert!(len <= INLINE_CAP);
3257        self.arc = unsafe {
3258            NonNull::new_unchecked(
3259                ((self.arc.as_ptr() as usize & !INLINE_LEN_MASK)
3260                    | (len << INLINE_LEN_OFFSET)) as _,
3261            )
3262        };
3263    }
3264
3265    /// slice.
3266    #[inline]
3267    unsafe fn set_len(&mut self, len: usize) {
3268        if self.is_inline() {
3269            assert!(len <= INLINE_CAP);
3270            self.set_inline_len(len);
3271        } else {
3272            assert!(len <= self.cap);
3273            self.len = len;
3274        }
3275    }
3276
3277    #[inline]
3278    fn is_empty(&self) -> bool {
3279        self.len() == 0
3280    }
3281
3282    #[inline]
3283    fn capacity(&self) -> usize {
3284        if self.is_inline() {
3285            INLINE_CAP
3286        } else {
3287            self.cap
3288        }
3289    }
3290
3291    fn split_off(&mut self, at: usize, create_inline: bool) -> Inner {
3292        let other = unsafe {
3293            if create_inline && self.len() - at <= INLINE_CAP {
3294                Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at)
3295            } else {
3296                let mut other = self.shallow_clone();
3297                other.set_start(at);
3298                other
3299            }
3300        };
3301        unsafe {
3302            if create_inline && at <= INLINE_CAP {
3303                *self = Inner::from_ptr_inline(self.as_ptr(), at);
3304            } else {
3305                self.set_end(at);
3306            }
3307        }
3308
3309        other
3310    }
3311
3312    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
3313        let other = unsafe {
3314            if create_inline && at <= INLINE_CAP {
3315                Inner::from_ptr_inline(self.as_ptr(), at)
3316            } else {
3317                let mut other = self.shallow_clone();
3318                other.set_end(at);
3319                other
3320            }
3321        };
3322        unsafe {
3323            if create_inline && self.len() - at <= INLINE_CAP {
3324                *self = Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at);
3325            } else {
3326                self.set_start(at);
3327            }
3328        }
3329
3330        other
3331    }
3332
3333    fn truncate(&mut self, len: usize, create_inline: bool) {
3334        unsafe {
3335            if len <= self.len() {
3336                if create_inline && len < INLINE_CAP {
3337                    *self = Inner::from_ptr_inline(self.as_ptr(), len);
3338                } else {
3339                    self.set_len(len);
3340                }
3341            }
3342        }
3343    }
3344
3345    fn resize(&mut self, new_len: usize, value: u8) {
3346        let len = self.len();
3347        if new_len > len {
3348            let additional = new_len - len;
3349            self.reserve(additional);
3350            unsafe {
3351                let dst = self.as_raw()[len..].as_mut_ptr();
3352                ptr::write_bytes(dst, value, additional);
3353                self.set_len(new_len);
3354            }
3355        } else {
3356            self.truncate(new_len, false);
3357        }
3358    }
3359
3360    unsafe fn set_start(&mut self, start: usize) {
3361        // Setting the start to 0 is a no-op, so return early if this is the
3362        // case.
3363        if start == 0 {
3364            return;
3365        }
3366
3367        let kind = self.kind();
3368
3369        // Always check `inline` first, because if the handle is using inline
3370        // data storage, all of the `Inner` struct fields will be gibberish.
3371        if kind == KIND_INLINE {
3372            assert!(start <= INLINE_CAP);
3373
3374            let len = self.inline_len();
3375            if len <= start {
3376                self.set_inline_len(0);
3377            } else {
3378                // `set_start` is essentially shifting data off the front of the
3379                // view. Inlined buffers only track the length of the slice.
3380                // So, to update the start, the data at the new starting point
3381                // is copied to the beginning of the buffer.
3382                let new_len = len - start;
3383
3384                let dst = self.inline_ptr();
3385                let src = (dst as *const u8).add(start);
3386
3387                ptr::copy(src, dst, new_len);
3388
3389                self.set_inline_len(new_len);
3390            }
3391        } else {
3392            assert!(start <= self.cap);
3393
3394            // Updating the start of the view is setting `ptr` to point to the
3395            // new start and updating the `len` field to reflect the new length
3396            // of the view.
3397            self.ptr = self.ptr.add(start);
3398
3399            if self.len >= start {
3400                self.len -= start;
3401            } else {
3402                self.len = 0;
3403            }
3404
3405            self.cap -= start;
3406        }
3407    }
3408
3409    unsafe fn set_end(&mut self, end: usize) {
3410        // Always check `inline` first, because if the handle is using inline
3411        // data storage, all of the `Inner` struct fields will be gibberish.
3412        if self.is_inline() {
3413            assert!(end <= INLINE_CAP);
3414            let new_len = cmp::min(self.inline_len(), end);
3415            self.set_inline_len(new_len);
3416        } else {
3417            assert!(end <= self.cap);
3418
3419            self.cap = end;
3420            self.len = cmp::min(self.len, end);
3421        }
3422    }
3423
3424    /// Checks if it is safe to mutate the memory
3425    fn is_mut_safe(&self) -> bool {
3426        let kind = self.kind();
3427
3428        // Always check `inline` first, because if the handle is using inline
3429        // data storage, all of the `Inner` struct fields will be gibberish.
3430        if kind == KIND_INLINE {
3431            // Inlined buffers can always be mutated as the data is never shared
3432            // across handles.
3433            true
3434        } else if kind == KIND_STATIC {
3435            false
3436        } else if kind == KIND_VEC {
3437            // Otherwise, the underlying buffer is potentially shared with other
3438            // handles, so the ref_count needs to be checked.
3439            unsafe { (*self.shared_vec()).is_unique() }
3440        } else {
3441            // Otherwise, the underlying buffer is potentially shared with other
3442            // handles, so the ref_count needs to be checked.
3443            unsafe { (*self.arc.as_ptr()).is_unique() }
3444        }
3445    }
3446
3447    /// Increments the ref count. This should only be done if it is known that
3448    /// it can be done safely. As such, this fn is not public, instead other
3449    /// fns will use this one while maintaining the guarantees.
3450    /// Parameter `mut_self` should only be set to `true` if caller holds
3451    /// `&mut self` reference.
3452    ///
3453    /// "Safely" is defined as not exposing two `BytesMut` values that point to
3454    /// the same byte window.
3455    ///
3456    /// This function is thread safe.
3457    unsafe fn shallow_clone(&self) -> Inner {
3458        // Always check `inline` first, because if the handle is using inline
3459        // data storage, all of the `Inner` struct fields will be gibberish.
3460        //
3461        // Additionally, if kind is STATIC, then Arc is *never* changed, making
3462        // it safe and faster to check for it now before an atomic acquire.
3463
3464        if self.is_inline_or_static() {
3465            // In this case, a shallow_clone still involves copying the data.
3466            let mut inner: mem::MaybeUninit<Inner> = mem::MaybeUninit::uninit();
3467            ptr::copy_nonoverlapping(self, inner.as_mut_ptr(), 1);
3468            inner.assume_init()
3469        } else {
3470            self.shallow_clone_sync()
3471        }
3472    }
3473
3474    #[cold]
3475    unsafe fn shallow_clone_sync(&self) -> Inner {
3476        // The function requires `&self`, this means that `shallow_clone`
3477        // could be called concurrently.
3478        //
3479        // The first step is to load the value of `arc`. This will determine
3480        // how to proceed. The `Acquire` ordering synchronizes with the
3481        // `compare_and_swap` that comes later in this function. The goal is
3482        // to ensure that if `arc` is currently set to point to a `Shared`,
3483        // that the current thread acquires the associated memory.
3484        let arc: *mut Shared = self.arc.as_ptr();
3485        let kind = arc as usize & KIND_MASK;
3486
3487        if kind == KIND_ARC {
3488            let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
3489            if old_size == usize::MAX {
3490                abort();
3491            }
3492
3493            Inner {
3494                arc: NonNull::new_unchecked(arc),
3495                ..*self
3496            }
3497        } else {
3498            assert!(kind == KIND_VEC);
3499
3500            let vec_arc = (arc as usize & KIND_UNMASK) as *mut SharedVec;
3501            let old_size = (*vec_arc).ref_count.fetch_add(1, Relaxed);
3502            if old_size == usize::MAX {
3503                abort();
3504            }
3505
3506            Inner {
3507                arc: NonNull::new_unchecked(arc),
3508                ..*self
3509            }
3510        }
3511    }
3512
3513    #[inline]
3514    fn reserve(&mut self, additional: usize) {
3515        let len = self.len();
3516        let rem = self.capacity() - len;
3517
3518        if additional <= rem {
3519            // The handle can already store at least `additional` more bytes, so
3520            // there is no further work needed to be done.
3521            return;
3522        }
3523
3524        self.reserve_inner(additional)
3525    }
3526
3527    #[inline]
3528    // In separate function to allow the short-circuits in `reserve` to
3529    // be inline-able. Significant helps performance.
3530    fn reserve_inner(&mut self, additional: usize) {
3531        let len = self.len();
3532        let kind = self.kind();
3533
3534        // Always check `inline` first, because if the handle is using inline
3535        // data storage, all of the `Inner` struct fields will be gibberish.
3536        if kind == KIND_INLINE {
3537            let new_cap = len + additional;
3538
3539            // Promote to a vector
3540            *self = Inner::from_slice(new_cap, self.as_ref(), PoolId::DEFAULT.pool_ref());
3541            return;
3542        }
3543
3544        // Reserving involves abandoning the currently shared buffer and
3545        // allocating a new vector with the requested capacity.
3546        let new_cap = len + additional;
3547
3548        if kind == KIND_VEC {
3549            let vec = self.shared_vec();
3550
3551            unsafe {
3552                let vec_cap = (*vec).cap - SHARED_VEC_SIZE;
3553
3554                // First, try to reclaim the buffer. This is possible if the current
3555                // handle is the only outstanding handle pointing to the buffer.
3556                if (*vec).is_unique() && vec_cap >= new_cap {
3557                    // The capacity is sufficient, reclaim the buffer
3558                    let ptr = (vec as *mut u8).add(SHARED_VEC_SIZE);
3559                    ptr::copy(self.ptr, ptr, len);
3560
3561                    self.ptr = ptr;
3562                    self.cap = vec_cap;
3563                } else {
3564                    // Create a new vector storage
3565                    *self = Inner::from_slice(new_cap, self.as_ref(), (*vec).pool);
3566                }
3567            }
3568        } else {
3569            debug_assert!(kind == KIND_ARC);
3570
3571            let arc = self.arc.as_ptr();
3572            unsafe {
3573                // First, try to reclaim the buffer. This is possible if the current
3574                // handle is the only outstanding handle pointing to the buffer.
3575                if (*arc).is_unique() {
3576                    // This is the only handle to the buffer. It can be reclaimed.
3577                    // However, before doing the work of copying data, check to make
3578                    // sure that the vector has enough capacity.
3579                    let v = &mut (*arc).vec;
3580
3581                    if v.capacity() >= new_cap {
3582                        // The capacity is sufficient, reclaim the buffer
3583                        let ptr = v.as_mut_ptr();
3584
3585                        ptr::copy(self.ptr, ptr, len);
3586
3587                        self.ptr = ptr;
3588                        self.cap = v.capacity();
3589                        return;
3590                    }
3591                }
3592
3593                // Create a new vector storage
3594                *self = Inner::from_slice(new_cap, self.as_ref(), (*arc).pool);
3595            }
3596        }
3597    }
3598
3599    /// Returns true if the buffer is stored inline
3600    #[inline]
3601    fn is_inline(&self) -> bool {
3602        self.kind() == KIND_INLINE
3603    }
3604
3605    #[inline]
3606    fn is_inline_or_static(&self) -> bool {
3607        // The value returned by `kind` isn't itself safe, but the value could
3608        // inform what operations to take, and unsafely do something without
3609        // synchronization.
3610        //
3611        // KIND_INLINE and KIND_STATIC will *never* change, so branches on that
3612        // information is safe.
3613        let kind = self.kind();
3614        kind == KIND_INLINE || kind == KIND_STATIC
3615    }
3616
3617    /// Used for `debug_assert` statements
3618    #[inline]
3619    fn is_static(&self) -> bool {
3620        matches!(self.kind(), KIND_STATIC)
3621    }
3622
3623    #[inline]
3624    fn shared_vec(&self) -> *mut SharedVec {
3625        ((self.arc.as_ptr() as usize) & KIND_UNMASK) as *mut SharedVec
3626    }
3627
3628    #[inline]
3629    fn kind(&self) -> usize {
3630        // This function is going to probably raise some eyebrows. The function
3631        // returns true if the buffer is stored inline. This is done by checking
3632        // the least significant bit in the `arc` field.
3633        //
3634        // Now, you may notice that `arc` is an `AtomicPtr` and this is
3635        // accessing it as a normal field without performing an atomic load...
3636        //
3637        // Again, the function only cares about the least significant bit, and
3638        // this bit is set when `Inner` is created and never changed after that.
3639        // All platforms have atomic "word" operations and won't randomly flip
3640        // bits, so even without any explicit atomic operations, reading the
3641        // flag will be correct.
3642        //
3643        // This is undefined behavior due to a data race, but experimental
3644        // evidence shows that it works in practice (discussion:
3645        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3646        //
3647        // This function is very critical performance wise as it is called for
3648        // every operation. Performing an atomic load would mess with the
3649        // compiler's ability to optimize. Simple benchmarks show up to a 10%
3650        // slowdown using a `Relaxed` atomic load on x86.
3651
3652        #[cfg(target_endian = "little")]
3653        #[inline]
3654        fn imp(arc: *mut Shared) -> usize {
3655            (arc as usize) & KIND_MASK
3656        }
3657
3658        #[cfg(target_endian = "big")]
3659        #[inline]
3660        fn imp(arc: *mut Shared) -> usize {
3661            unsafe {
3662                let p: *const usize = arc as *const usize;
3663                *p & KIND_MASK
3664            }
3665        }
3666
3667        imp(self.arc.as_ptr())
3668    }
3669}
3670
3671impl Drop for Inner {
3672    fn drop(&mut self) {
3673        let kind = self.kind();
3674
3675        if kind == KIND_VEC {
3676            release_shared_vec(self.shared_vec());
3677        } else if kind == KIND_ARC {
3678            release_shared(self.arc.as_ptr());
3679        }
3680    }
3681}
3682
3683fn release_shared(ptr: *mut Shared) {
3684    // `Shared` storage... follow the drop steps from Arc.
3685    unsafe {
3686        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3687            return;
3688        }
3689
3690        // This fence is needed to prevent reordering of use of the data and
3691        // deletion of the data.  Because it is marked `Release`, the decreasing
3692        // of the reference count synchronizes with this `Acquire` fence. This
3693        // means that use of the data happens before decreasing the reference
3694        // count, which happens before this fence, which happens before the
3695        // deletion of the data.
3696        //
3697        // As explained in the [Boost documentation][1],
3698        //
3699        // > It is important to enforce any possible access to the object in one
3700        // > thread (through an existing reference) to *happen before* deleting
3701        // > the object in a different thread. This is achieved by a "release"
3702        // > operation after dropping a reference (any access to the object
3703        // > through this reference must obviously happened before), and an
3704        // > "acquire" operation before deleting the object.
3705        //
3706        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3707        atomic::fence(Acquire);
3708
3709        // Drop the data
3710        let arc = Box::from_raw(ptr);
3711        arc.pool.release(arc.vec.capacity());
3712    }
3713}
3714
3715fn release_shared_vec(ptr: *mut SharedVec) {
3716    // `Shared` storage... follow the drop steps from Arc.
3717    unsafe {
3718        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3719            return;
3720        }
3721
3722        // This fence is needed to prevent reordering of use of the data and
3723        // deletion of the data.  Because it is marked `Release`, the decreasing
3724        // of the reference count synchronizes with this `Acquire` fence. This
3725        // means that use of the data happens before decreasing the reference
3726        // count, which happens before this fence, which happens before the
3727        // deletion of the data.
3728        //
3729        // As explained in the [Boost documentation][1],
3730        //
3731        // > It is important to enforce any possible access to the object in one
3732        // > thread (through an existing reference) to *happen before* deleting
3733        // > the object in a different thread. This is achieved by a "release"
3734        // > operation after dropping a reference (any access to the object
3735        // > through this reference must obviously happened before), and an
3736        // > "acquire" operation before deleting the object.
3737        //
3738        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3739        atomic::fence(Acquire);
3740
3741        // Drop the data
3742        let cap = (*ptr).cap;
3743        (*ptr).pool.release(cap);
3744        ptr::drop_in_place(ptr);
3745        Vec::<u8>::from_raw_parts(ptr as *mut u8, 0, cap);
3746    }
3747}
3748
3749impl Shared {
3750    fn is_unique(&self) -> bool {
3751        // The goal is to check if the current handle is the only handle
3752        // that currently has access to the buffer. This is done by
3753        // checking if the `ref_count` is currently 1.
3754        //
3755        // The `Acquire` ordering synchronizes with the `Release` as
3756        // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
3757        // operation guarantees that any mutations done in other threads
3758        // are ordered before the `ref_count` is decremented. As such,
3759        // this `Acquire` will guarantee that those mutations are
3760        // visible to the current thread.
3761        self.ref_count.load(Acquire) == 1
3762    }
3763}
3764
3765impl SharedVec {
3766    fn is_unique(&self) -> bool {
3767        // This is same as Shared::is_unique() but for KIND_VEC
3768        self.ref_count.load(Acquire) == 1
3769    }
3770}
3771
3772unsafe impl Send for Inner {}
3773unsafe impl Sync for Inner {}
3774
3775/*
3776 *
3777 * ===== PartialEq / PartialOrd =====
3778 *
3779 */
3780
3781impl PartialEq<[u8]> for BytesMut {
3782    fn eq(&self, other: &[u8]) -> bool {
3783        &**self == other
3784    }
3785}
3786
3787impl<const N: usize> PartialEq<[u8; N]> for BytesMut {
3788    fn eq(&self, other: &[u8; N]) -> bool {
3789        &**self == other
3790    }
3791}
3792
3793impl PartialEq<BytesMut> for [u8] {
3794    fn eq(&self, other: &BytesMut) -> bool {
3795        *other == *self
3796    }
3797}
3798
3799impl<const N: usize> PartialEq<BytesMut> for [u8; N] {
3800    fn eq(&self, other: &BytesMut) -> bool {
3801        *other == *self
3802    }
3803}
3804
3805impl<const N: usize> PartialEq<BytesMut> for &[u8; N] {
3806    fn eq(&self, other: &BytesMut) -> bool {
3807        *other == *self
3808    }
3809}
3810
3811impl PartialEq<str> for BytesMut {
3812    fn eq(&self, other: &str) -> bool {
3813        &**self == other.as_bytes()
3814    }
3815}
3816
3817impl PartialEq<BytesMut> for str {
3818    fn eq(&self, other: &BytesMut) -> bool {
3819        *other == *self
3820    }
3821}
3822
3823impl PartialEq<Vec<u8>> for BytesMut {
3824    fn eq(&self, other: &Vec<u8>) -> bool {
3825        *self == other[..]
3826    }
3827}
3828
3829impl PartialEq<BytesMut> for Vec<u8> {
3830    fn eq(&self, other: &BytesMut) -> bool {
3831        *other == *self
3832    }
3833}
3834
3835impl PartialEq<String> for BytesMut {
3836    fn eq(&self, other: &String) -> bool {
3837        *self == other[..]
3838    }
3839}
3840
3841impl PartialEq<BytesMut> for String {
3842    fn eq(&self, other: &BytesMut) -> bool {
3843        *other == *self
3844    }
3845}
3846
3847impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
3848where
3849    BytesMut: PartialEq<T>,
3850{
3851    fn eq(&self, other: &&'a T) -> bool {
3852        *self == **other
3853    }
3854}
3855
3856impl PartialEq<BytesMut> for &[u8] {
3857    fn eq(&self, other: &BytesMut) -> bool {
3858        *other == *self
3859    }
3860}
3861
3862impl PartialEq<BytesMut> for &str {
3863    fn eq(&self, other: &BytesMut) -> bool {
3864        *other == *self
3865    }
3866}
3867
3868impl PartialEq<[u8]> for Bytes {
3869    fn eq(&self, other: &[u8]) -> bool {
3870        self.inner.as_ref() == other
3871    }
3872}
3873
3874impl<const N: usize> PartialEq<[u8; N]> for Bytes {
3875    fn eq(&self, other: &[u8; N]) -> bool {
3876        self.inner.as_ref() == other.as_ref()
3877    }
3878}
3879
3880impl PartialOrd<[u8]> for Bytes {
3881    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
3882        self.inner.as_ref().partial_cmp(other)
3883    }
3884}
3885
3886impl<const N: usize> PartialOrd<[u8; N]> for Bytes {
3887    fn partial_cmp(&self, other: &[u8; N]) -> Option<cmp::Ordering> {
3888        self.inner.as_ref().partial_cmp(other.as_ref())
3889    }
3890}
3891
3892impl PartialEq<Bytes> for [u8] {
3893    fn eq(&self, other: &Bytes) -> bool {
3894        *other == *self
3895    }
3896}
3897
3898impl<const N: usize> PartialEq<Bytes> for [u8; N] {
3899    fn eq(&self, other: &Bytes) -> bool {
3900        *other == *self
3901    }
3902}
3903
3904impl<const N: usize> PartialEq<Bytes> for &[u8; N] {
3905    fn eq(&self, other: &Bytes) -> bool {
3906        *other == *self
3907    }
3908}
3909
3910impl PartialOrd<Bytes> for [u8] {
3911    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3912        other.partial_cmp(self)
3913    }
3914}
3915
3916impl<const N: usize> PartialOrd<Bytes> for [u8; N] {
3917    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3918        other.partial_cmp(self)
3919    }
3920}
3921
3922impl PartialEq<str> for Bytes {
3923    fn eq(&self, other: &str) -> bool {
3924        self.inner.as_ref() == other.as_bytes()
3925    }
3926}
3927
3928impl PartialOrd<str> for Bytes {
3929    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
3930        self.inner.as_ref().partial_cmp(other.as_bytes())
3931    }
3932}
3933
3934impl PartialEq<Bytes> for str {
3935    fn eq(&self, other: &Bytes) -> bool {
3936        *other == *self
3937    }
3938}
3939
3940impl PartialOrd<Bytes> for str {
3941    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3942        other.partial_cmp(self)
3943    }
3944}
3945
3946impl PartialEq<Vec<u8>> for Bytes {
3947    fn eq(&self, other: &Vec<u8>) -> bool {
3948        *self == other[..]
3949    }
3950}
3951
3952impl PartialOrd<Vec<u8>> for Bytes {
3953    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
3954        self.inner.as_ref().partial_cmp(&other[..])
3955    }
3956}
3957
3958impl PartialEq<Bytes> for Vec<u8> {
3959    fn eq(&self, other: &Bytes) -> bool {
3960        *other == *self
3961    }
3962}
3963
3964impl PartialOrd<Bytes> for Vec<u8> {
3965    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3966        other.partial_cmp(self)
3967    }
3968}
3969
3970impl PartialEq<String> for Bytes {
3971    fn eq(&self, other: &String) -> bool {
3972        *self == other[..]
3973    }
3974}
3975
3976impl PartialOrd<String> for Bytes {
3977    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
3978        self.inner.as_ref().partial_cmp(other.as_bytes())
3979    }
3980}
3981
3982impl PartialEq<Bytes> for String {
3983    fn eq(&self, other: &Bytes) -> bool {
3984        *other == *self
3985    }
3986}
3987
3988impl PartialOrd<Bytes> for String {
3989    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3990        other.partial_cmp(self)
3991    }
3992}
3993
3994impl PartialEq<Bytes> for &[u8] {
3995    fn eq(&self, other: &Bytes) -> bool {
3996        *other == *self
3997    }
3998}
3999
4000impl PartialOrd<Bytes> for &[u8] {
4001    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
4002        other.partial_cmp(self)
4003    }
4004}
4005
4006impl PartialEq<Bytes> for &str {
4007    fn eq(&self, other: &Bytes) -> bool {
4008        *other == *self
4009    }
4010}
4011
4012impl PartialOrd<Bytes> for &str {
4013    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
4014        other.partial_cmp(self)
4015    }
4016}
4017
4018impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
4019where
4020    Bytes: PartialEq<T>,
4021{
4022    fn eq(&self, other: &&'a T) -> bool {
4023        *self == **other
4024    }
4025}
4026
4027impl From<BytesVec> for Bytes {
4028    fn from(b: BytesVec) -> Self {
4029        b.freeze()
4030    }
4031}
4032
4033impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
4034where
4035    Bytes: PartialOrd<T>,
4036{
4037    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
4038        self.partial_cmp(&**other)
4039    }
4040}
4041
4042impl PartialEq<BytesMut> for Bytes {
4043    fn eq(&self, other: &BytesMut) -> bool {
4044        other[..] == self[..]
4045    }
4046}
4047
4048impl PartialEq<BytesVec> for Bytes {
4049    fn eq(&self, other: &BytesVec) -> bool {
4050        other[..] == self[..]
4051    }
4052}
4053
4054impl PartialEq<Bytes> for BytesVec {
4055    fn eq(&self, other: &Bytes) -> bool {
4056        other[..] == self[..]
4057    }
4058}
4059
4060impl PartialEq<Bytes> for BytesMut {
4061    fn eq(&self, other: &Bytes) -> bool {
4062        other[..] == self[..]
4063    }
4064}
4065
4066impl PartialEq<BytesMut> for BytesVec {
4067    fn eq(&self, other: &BytesMut) -> bool {
4068        other[..] == self[..]
4069    }
4070}
4071
4072impl PartialEq<BytesVec> for BytesMut {
4073    fn eq(&self, other: &BytesVec) -> bool {
4074        other[..] == self[..]
4075    }
4076}
4077
4078impl PartialEq<[u8]> for BytesVec {
4079    fn eq(&self, other: &[u8]) -> bool {
4080        &**self == other
4081    }
4082}
4083
4084impl<const N: usize> PartialEq<[u8; N]> for BytesVec {
4085    fn eq(&self, other: &[u8; N]) -> bool {
4086        &**self == other
4087    }
4088}
4089
4090impl PartialEq<BytesVec> for [u8] {
4091    fn eq(&self, other: &BytesVec) -> bool {
4092        *other == *self
4093    }
4094}
4095
4096impl<const N: usize> PartialEq<BytesVec> for [u8; N] {
4097    fn eq(&self, other: &BytesVec) -> bool {
4098        *other == *self
4099    }
4100}
4101
4102impl<const N: usize> PartialEq<BytesVec> for &[u8; N] {
4103    fn eq(&self, other: &BytesVec) -> bool {
4104        *other == *self
4105    }
4106}
4107
4108impl PartialEq<str> for BytesVec {
4109    fn eq(&self, other: &str) -> bool {
4110        &**self == other.as_bytes()
4111    }
4112}
4113
4114impl PartialEq<BytesVec> for str {
4115    fn eq(&self, other: &BytesVec) -> bool {
4116        *other == *self
4117    }
4118}
4119
4120impl PartialEq<Vec<u8>> for BytesVec {
4121    fn eq(&self, other: &Vec<u8>) -> bool {
4122        *self == other[..]
4123    }
4124}
4125
4126impl PartialEq<BytesVec> for Vec<u8> {
4127    fn eq(&self, other: &BytesVec) -> bool {
4128        *other == *self
4129    }
4130}
4131
4132impl PartialEq<String> for BytesVec {
4133    fn eq(&self, other: &String) -> bool {
4134        *self == other[..]
4135    }
4136}
4137
4138impl PartialEq<BytesVec> for String {
4139    fn eq(&self, other: &BytesVec) -> bool {
4140        *other == *self
4141    }
4142}
4143
4144impl<'a, T: ?Sized> PartialEq<&'a T> for BytesVec
4145where
4146    BytesVec: PartialEq<T>,
4147{
4148    fn eq(&self, other: &&'a T) -> bool {
4149        *self == **other
4150    }
4151}
4152
4153impl PartialEq<BytesVec> for &[u8] {
4154    fn eq(&self, other: &BytesVec) -> bool {
4155        *other == *self
4156    }
4157}
4158
4159impl PartialEq<BytesVec> for &str {
4160    fn eq(&self, other: &BytesVec) -> bool {
4161        *other == *self
4162    }
4163}
4164
4165// While there is `std::process:abort`, it's only available in Rust 1.17, and
4166// our minimum supported version is currently 1.15. So, this acts as an abort
4167// by triggering a double panic, which always aborts in Rust.
4168struct Abort;
4169
4170impl Drop for Abort {
4171    fn drop(&mut self) {
4172        panic!();
4173    }
4174}
4175
4176#[inline(never)]
4177#[cold]
4178fn abort() {
4179    let _a = Abort;
4180    panic!();
4181}
4182
4183#[cfg(test)]
4184mod tests {
4185    use std::collections::HashMap;
4186
4187    use super::*;
4188
4189    const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
4190        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
4191        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb";
4192
4193    #[test]
4194    fn trimdown() {
4195        let mut b = Bytes::from(LONG.to_vec());
4196        assert_eq!(b.inner.capacity(), 263);
4197        unsafe { b.inner.set_len(68) };
4198        assert_eq!(b.len(), 68);
4199        assert_eq!(b.inner.capacity(), 263);
4200        b.trimdown();
4201        assert_eq!(b.inner.capacity(), 96);
4202
4203        unsafe { b.inner.set_len(16) };
4204        b.trimdown();
4205        assert!(b.is_inline());
4206    }
4207
4208    #[test]
4209    #[allow(
4210        clippy::len_zero,
4211        clippy::nonminimal_bool,
4212        clippy::unnecessary_fallible_conversions
4213    )]
4214    fn bytes() {
4215        let mut b = Bytes::from(LONG.to_vec());
4216        b.clear();
4217        assert!(b.is_inline());
4218        assert!(b.is_empty());
4219        assert!(b.len() == 0);
4220
4221        let b = Bytes::from(&Bytes::from(LONG));
4222        assert_eq!(b, LONG);
4223
4224        let b = Bytes::from(BytesMut::from(LONG));
4225        assert_eq!(b, LONG);
4226
4227        let mut b: Bytes = BytesMut::try_from(b).unwrap().freeze();
4228        assert_eq!(b, LONG);
4229        assert!(!(b > b));
4230        assert_eq!(<Bytes as Buf>::remaining(&b), LONG.len());
4231        assert_eq!(<Bytes as Buf>::chunk(&b), LONG);
4232        <Bytes as Buf>::advance(&mut b, 10);
4233        assert_eq!(Buf::chunk(&b), &LONG[10..]);
4234
4235        let mut h: HashMap<Bytes, usize> = HashMap::default();
4236        h.insert(b.clone(), 1);
4237        assert_eq!(h.get(&b), Some(&1));
4238
4239        let mut b = BytesMut::try_from(LONG).unwrap();
4240        assert_eq!(b, LONG);
4241        assert_eq!(<BytesMut as Buf>::remaining(&b), LONG.len());
4242        assert_eq!(<BytesMut as BufMut>::remaining_mut(&b), 25);
4243        assert_eq!(<BytesMut as Buf>::chunk(&b), LONG);
4244        <BytesMut as Buf>::advance(&mut b, 10);
4245        assert_eq!(<BytesMut as Buf>::chunk(&b), &LONG[10..]);
4246
4247        let mut b = BytesMut::with_capacity(12);
4248        <BytesMut as BufMut>::put_i8(&mut b, 1);
4249        assert_eq!(b, b"\x01".as_ref());
4250        <BytesMut as BufMut>::put_u8(&mut b, 2);
4251        assert_eq!(b, b"\x01\x02".as_ref());
4252        <BytesMut as BufMut>::put_slice(&mut b, b"12345");
4253        assert_eq!(b, b"\x01\x0212345".as_ref());
4254        <BytesMut as BufMut>::chunk_mut(&mut b).write_byte(0, b'1');
4255        unsafe { <BytesMut as BufMut>::advance_mut(&mut b, 1) };
4256        assert_eq!(b, b"\x01\x02123451".as_ref());
4257    }
4258
4259    #[test]
4260    #[allow(clippy::unnecessary_fallible_conversions)]
4261    fn bytes_vec() {
4262        let bv = BytesVec::copy_from_slice(LONG);
4263        // SharedVec size is 32
4264        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>() * 9);
4265        assert_eq!(bv.len(), 263);
4266        assert_eq!(bv.as_ref().len(), 263);
4267        assert_eq!(bv.as_ref(), LONG);
4268
4269        let mut bv = BytesVec::copy_from_slice(&b"hello"[..]);
4270        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>());
4271        assert_eq!(bv.len(), 5);
4272        assert_eq!(bv.as_ref().len(), 5);
4273        assert_eq!(bv.as_ref()[0], b"h"[0]);
4274        bv.put_u8(b" "[0]);
4275        assert_eq!(bv.as_ref(), &b"hello "[..]);
4276        bv.put("world");
4277        assert_eq!(bv, "hello world");
4278
4279        let b = Bytes::from(bv);
4280        assert_eq!(b, "hello world");
4281
4282        let mut b = BytesMut::try_from(b).unwrap();
4283        b.put(".");
4284        assert_eq!(b, "hello world.");
4285    }
4286}