ntex_bytes/
bytes.rs

1use std::borrow::{Borrow, BorrowMut};
2use std::ops::{Deref, DerefMut, RangeBounds};
3use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
4use std::sync::atomic::{self, AtomicUsize};
5use std::{cmp, fmt, hash, mem, ptr, ptr::NonNull, slice};
6
7use crate::pool::{PoolId, PoolRef};
8use crate::{buf::IntoIter, buf::UninitSlice, debug, Buf, BufMut};
9
10/// A reference counted contiguous slice of memory.
11///
12/// `Bytes` is an efficient container for storing and operating on contiguous
13/// slices of memory. It is intended for use primarily in networking code, but
14/// could have applications elsewhere as well.
15///
16/// `Bytes` values facilitate zero-copy network programming by allowing multiple
17/// `Bytes` objects to point to the same underlying memory. This is managed by
18/// using a reference count to track when the memory is no longer needed and can
19/// be freed.
20///
21/// ```
22/// use ntex_bytes::Bytes;
23///
24/// let mut mem = Bytes::from(&b"Hello world"[..]);
25/// let a = mem.slice(0..5);
26///
27/// assert_eq!(a, b"Hello");
28///
29/// let b = mem.split_to(6);
30///
31/// assert_eq!(mem, b"world");
32/// assert_eq!(b, b"Hello ");
33/// ```
34///
35/// # Memory layout
36///
37/// The `Bytes` struct itself is fairly small, limited to a pointer to the
38/// memory and 4 `usize` fields used to track information about which segment of
39/// the underlying memory the `Bytes` handle has access to.
40///
41/// The memory layout looks like this:
42///
43/// ```text
44/// +-------+
45/// | Bytes |
46/// +-------+
47///  /      \_____
48/// |              \
49/// v               v
50/// +-----+------------------------------------+
51/// | Arc |         |      Data     |          |
52/// +-----+------------------------------------+
53/// ```
54///
55/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory
56/// slice and a pointer to the start of the region visible by the handle.
57/// `Bytes` also tracks the length of its view into the memory.
58///
59/// # Sharing
60///
61/// The memory itself is reference counted, and multiple `Bytes` objects may
62/// point to the same region. Each `Bytes` handle point to different sections within
63/// the memory region, and `Bytes` handle may or may not have overlapping views
64/// into the memory.
65///
66///
67/// ```text
68///
69///    Arc ptrs                   +---------+
70///    ________________________ / | Bytes 2 |
71///   /                           +---------+
72///  /          +-----------+     |         |
73/// |_________/ |  Bytes 1  |     |         |
74/// |           +-----------+     |         |
75/// |           |           | ___/ data     | tail
76/// |      data |      tail |/              |
77/// v           v           v               v
78/// +-----+---------------------------------+-----+
79/// | Arc |     |           |               |     |
80/// +-----+---------------------------------+-----+
81/// ```
82///
83/// # Mutating
84///
85/// While `Bytes` handles may potentially represent overlapping views of the
86/// underlying memory slice and may not be mutated, `BytesMut` handles are
87/// guaranteed to be the only handle able to view that slice of memory. As such,
88/// `BytesMut` handles are able to mutate the underlying memory. Note that
89/// holding a unique view to a region of memory does not mean that there are no
90/// other `Bytes` and `BytesMut` handles with disjoint views of the underlying
91/// memory.
92///
93/// # Inline bytes
94///
95/// As an optimization, when the slice referenced by a `Bytes` handle is small
96/// enough [^1]. In this case, a clone is no longer "shallow" and the data will
97/// be copied.  Converting from a `Vec` will never use inlining. `BytesMut` does
98/// not support data inlining and always allocates, but during converion to `Bytes`
99/// data from `BytesMut` could be inlined.
100///
101/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems.
102///
103pub struct Bytes {
104    inner: Inner,
105}
106
107/// A unique reference to a contiguous slice of memory.
108///
109/// `BytesMut` represents a unique view into a potentially shared memory region.
110/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
111/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
112/// allocations.
113///
114/// For more detail, see [Bytes](struct.Bytes.html).
115///
116/// # Growth
117///
118/// One key difference from `Vec<u8>` is that most operations **do not
119/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
120/// world");` could panic if `my_bytes` does not have enough capacity. Before
121/// writing to the buffer, ensure that there is enough remaining capacity by
122/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
123/// is preferable.
124///
125/// The only exception is `extend` which implicitly reserves required capacity.
126///
127/// # Examples
128///
129/// ```
130/// use ntex_bytes::{BytesMut, BufMut};
131///
132/// let mut buf = BytesMut::with_capacity(64);
133///
134/// buf.put_u8(b'h');
135/// buf.put_u8(b'e');
136/// buf.put("llo");
137///
138/// assert_eq!(buf, b"hello");
139///
140/// // Freeze the buffer so that it can be shared
141/// let a = buf.freeze();
142///
143/// // This does not allocate, instead `b` points to the same memory.
144/// let b = a.clone();
145///
146/// assert_eq!(a, b"hello");
147/// assert_eq!(b, b"hello");
148/// ```
149pub struct BytesMut {
150    inner: Inner,
151}
152
153/// A unique reference to a contiguous slice of memory.
154///
155/// `BytesVec` represents a unique view into a potentially shared memory region.
156/// Given the uniqueness guarantee, owners of `BytesVec` handles are able to
157/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
158/// allocations. It also always allocates.
159///
160/// For more detail, see [Bytes](struct.Bytes.html).
161///
162/// # Growth
163///
164/// One key difference from `Vec<u8>` is that most operations **do not
165/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
166/// world");` could panic if `my_bytes` does not have enough capacity. Before
167/// writing to the buffer, ensure that there is enough remaining capacity by
168/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
169/// is preferable.
170///
171/// The only exception is `extend` which implicitly reserves required capacity.
172///
173/// # Examples
174///
175/// ```
176/// use ntex_bytes::{BytesVec, BufMut};
177///
178/// let mut buf = BytesVec::with_capacity(64);
179///
180/// buf.put_u8(b'h');
181/// buf.put_u8(b'e');
182/// buf.put("llo");
183///
184/// assert_eq!(&buf[..], b"hello");
185///
186/// // Freeze the buffer so that it can be shared
187/// let a = buf.freeze();
188///
189/// // This does not allocate, instead `b` points to the same memory.
190/// let b = a.clone();
191///
192/// assert_eq!(a, b"hello");
193/// assert_eq!(b, b"hello");
194/// ```
195pub struct BytesVec {
196    inner: InnerVec,
197}
198
199// Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated
200// to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions
201// that mutate the underlying buffer are only performed when the data range
202// being mutated is only available via a single `BytesMut` handle.
203//
204// # Data storage modes
205//
206// The goal of `bytes` is to be as efficient as possible across a wide range of
207// potential usage patterns. As such, `bytes` needs to be able to handle buffers
208// that are never shared, shared on a single thread, and shared across many
209// threads. `bytes` also needs to handle both tiny buffers as well as very large
210// buffers. For example, [Cassandra](http://cassandra.apache.org) values have
211// been known to be in the hundreds of megabyte, and HTTP header values can be a
212// few characters in size.
213//
214// To achieve high performance in these various situations, `Bytes` and
215// `BytesMut` use different strategies for storing the buffer depending on the
216// usage pattern.
217//
218// ## Delayed `Arc` allocation
219//
220// When a `Bytes` or `BytesMut` is first created, there is only one outstanding
221// handle referencing the buffer. Since sharing is not yet required, an `Arc`* is
222// not used and the buffer is backed by a `Vec<u8>` directly. Using an
223// `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being
224// shared, that allocation is avoided.
225//
226// When sharing does become necessary (`clone`, `split_to`, `split_off`), that
227// is when the buffer is promoted to being shareable. The `Vec<u8>` is moved
228// into an `Arc` and both the original handle and the new handle use the same
229// buffer via the `Arc`.
230//
231// * `Arc` is being used to signify an atomically reference counted cell. We
232// don't use the `Arc` implementation provided by `std` and instead use our own.
233// This ends up simplifying a number of the `unsafe` code snippets.
234//
235// ## Inlining small buffers
236//
237// The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit
238// systems, this ends up being 32 bytes, which is actually a lot of storage for
239// cases where `Bytes` is being used to represent small byte strings, such as
240// HTTP header names and values.
241//
242// To avoid any allocation at all in these cases, `Bytes` will use the struct
243// itself for storing the buffer, reserving 1 byte for meta data. This means
244// that, on 64 bit systems, 31 byte buffers require no allocation at all.
245//
246// The byte used for metadata stores a 2 bits flag used to indicate that the
247// buffer is stored inline as well as 6 bits for tracking the buffer length (the
248// return value of `Bytes::len`).
249//
250// ## Static buffers
251//
252// `Bytes` can also represent a static buffer, which is created with
253// `Bytes::from_static`. No copying or allocations are required for tracking
254// static buffers. The pointer to the `&'static [u8]`, the length, and a flag
255// tracking that the `Bytes` instance represents a static buffer is stored in
256// the `Bytes` struct.
257//
258// # Struct layout
259//
260// Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the
261// data fields as well as all of the function implementations.
262//
263// The `Inner` struct is carefully laid out in order to support the
264// functionality described above as well as being as small as possible. Size is
265// important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes
266// added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header
267// map structure.
268//
269// The `Inner` struct contains the following fields:
270//
271// * `ptr: *mut u8`
272// * `len: usize`
273// * `cap: usize`
274// * `arc: *mut Shared`
275//
276// ## `ptr: *mut u8`
277//
278// A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`,
279// this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr`
280// may have been shifted to point somewhere inside the buffer.
281//
282// When in "inlined" mode, `ptr` is used as part of the inlined buffer.
283//
284// ## `len: usize`
285//
286// The length of the handle's buffer view. When backed by a `Vec<u8>`, this is
287// always the `Vec`'s length. The slice represented by `ptr` and `len` should
288// (ideally) always be initialized memory.
289//
290// When in "inlined" mode, `len` is used as part of the inlined buffer.
291//
292// ## `cap: usize`
293//
294// The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is
295// always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len`
296// may or may not be initialized memory.
297//
298// When in "inlined" mode, `cap` is used as part of the inlined buffer.
299//
300// ## `arc: *mut Shared`
301//
302// When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this
303// will be the pointer to the `Arc` structure tracking the ref count for the
304// underlying buffer. When the pointer is null, then the `Arc` has not been
305// allocated yet and `self` is the only outstanding handle for the underlying
306// buffer.
307//
308// The lower two bits of `arc` are used to track the storage mode of `Inner`.
309// `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11`
310// indicates vector storage, not yet promoted to Arc.  Since pointers to
311// allocated structures are aligned, the lower two bits of a pointer will always
312// be 0. This allows disambiguating between a pointer and the two flags.
313//
314// When in "inlined" mode, the least significant byte of `arc` is also used to
315// store the length of the buffer view (vs. the capacity, which is a constant).
316//
317// The rest of `arc`'s bytes are used as part of the inline buffer, which means
318// that those bytes need to be located next to the `ptr`, `len`, and `cap`
319// fields, which make up the rest of the inline buffer. This requires special
320// casing the layout of `Inner` depending on if the target platform is big or
321// little endian.
322//
323// On little endian platforms, the `arc` field must be the first field in the
324// struct. On big endian platforms, the `arc` field must be the last field in
325// the struct. Since a deterministic struct layout is required, `Inner` is
326// annotated with `#[repr(C)]`.
327//
328// # Thread safety
329//
330// `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done
331// by bumping the buffer ref count and returning a new struct pointing to the
332// same buffer. However, the `Arc` structure is lazily allocated. This means
333// that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone`
334// function can be called concurrently from multiple threads. This is why an
335// `AtomicPtr` is used for the `arc` field vs. a `*const`.
336//
337// Care is taken to ensure that the need for synchronization is minimized. Most
338// operations do not require any synchronization.
339//
340#[cfg(target_endian = "little")]
341#[repr(C)]
342struct Inner {
343    // WARNING: Do not access the fields directly unless you know what you are
344    // doing. Instead, use the fns. See implementation comment above.
345    arc: NonNull<Shared>,
346    ptr: *mut u8,
347    len: usize,
348    cap: usize,
349}
350
351#[cfg(target_endian = "big")]
352#[repr(C)]
353struct Inner {
354    // WARNING: Do not access the fields directly unless you know what you are
355    // doing. Instead, use the fns. See implementation comment above.
356    ptr: *mut u8,
357    len: usize,
358    cap: usize,
359    arc: NonNull<Shared>,
360}
361
362// Thread-safe reference-counted container for the shared storage. This mostly
363// the same as `std::sync::Arc` but without the weak counter. The ref counting
364// fns are based on the ones found in `std`.
365//
366// The main reason to use `Shared` instead of `std::sync::Arc` is that it ends
367// up making the overall code simpler and easier to reason about. This is due to
368// some of the logic around setting `Inner::arc` and other ways the `arc` field
369// is used. Using `Arc` ended up requiring a number of funky transmutes and
370// other shenanigans to make it work.
371struct Shared {
372    vec: Vec<u8>,
373    ref_count: AtomicUsize,
374    pool: PoolRef,
375}
376
377struct SharedVec {
378    cap: usize,
379    len: u32,
380    offset: u32,
381    ref_count: AtomicUsize,
382    pool: PoolRef,
383}
384
385// Buffer storage strategy flags.
386const KIND_ARC: usize = 0b00;
387const KIND_INLINE: usize = 0b01;
388const KIND_STATIC: usize = 0b10;
389const KIND_VEC: usize = 0b11;
390const KIND_MASK: usize = 0b11;
391const KIND_UNMASK: usize = !KIND_MASK;
392
393const MIN_NON_ZERO_CAP: usize = 64;
394const SHARED_VEC_SIZE: usize = mem::size_of::<SharedVec>();
395
396// Bit op constants for extracting the inline length value from the `arc` field.
397const INLINE_LEN_MASK: usize = 0b1111_1100;
398const INLINE_LEN_OFFSET: usize = 2;
399
400// Byte offset from the start of `Inner` to where the inline buffer data
401// starts. On little endian platforms, the first byte of the struct is the
402// storage flag, so the data is shifted by a byte. On big endian systems, the
403// data starts at the beginning of the struct.
404#[cfg(target_endian = "little")]
405const INLINE_DATA_OFFSET: isize = 2;
406#[cfg(target_endian = "big")]
407const INLINE_DATA_OFFSET: isize = 0;
408
409// Inline buffer capacity. This is the size of `Inner` minus 1 byte for the
410// metadata.
411#[cfg(target_pointer_width = "64")]
412const INLINE_CAP: usize = 4 * 8 - 2;
413#[cfg(target_pointer_width = "32")]
414const INLINE_CAP: usize = 4 * 4 - 2;
415
416/*
417 *
418 * ===== Bytes =====
419 *
420 */
421
422impl Bytes {
423    /// Creates a new empty `Bytes`.
424    ///
425    /// This will not allocate and the returned `Bytes` handle will be empty.
426    ///
427    /// # Examples
428    ///
429    /// ```
430    /// use ntex_bytes::Bytes;
431    ///
432    /// let b = Bytes::new();
433    /// assert_eq!(&b[..], b"");
434    /// ```
435    #[inline]
436    pub const fn new() -> Bytes {
437        Bytes {
438            inner: Inner::empty_inline(),
439        }
440    }
441
442    /// Creates a new `Bytes` from a static slice.
443    ///
444    /// The returned `Bytes` will point directly to the static slice. There is
445    /// no allocating or copying.
446    ///
447    /// # Examples
448    ///
449    /// ```
450    /// use ntex_bytes::Bytes;
451    ///
452    /// let b = Bytes::from_static(b"hello");
453    /// assert_eq!(&b[..], b"hello");
454    /// ```
455    #[inline]
456    pub const fn from_static(bytes: &'static [u8]) -> Bytes {
457        Bytes {
458            inner: Inner::from_static(bytes),
459        }
460    }
461
462    /// Returns the number of bytes contained in this `Bytes`.
463    ///
464    /// # Examples
465    ///
466    /// ```
467    /// use ntex_bytes::Bytes;
468    ///
469    /// let b = Bytes::from(&b"hello"[..]);
470    /// assert_eq!(b.len(), 5);
471    /// ```
472    #[inline]
473    pub fn len(&self) -> usize {
474        self.inner.len()
475    }
476
477    /// Returns true if the `Bytes` has a length of 0.
478    ///
479    /// # Examples
480    ///
481    /// ```
482    /// use ntex_bytes::Bytes;
483    ///
484    /// let b = Bytes::new();
485    /// assert!(b.is_empty());
486    /// ```
487    #[inline]
488    pub fn is_empty(&self) -> bool {
489        self.inner.is_empty()
490    }
491
492    /// Return true if the `Bytes` uses inline allocation
493    ///
494    /// # Examples
495    /// ```
496    /// use ntex_bytes::{Bytes, BytesMut};
497    ///
498    /// assert!(Bytes::from(BytesMut::from(&[0, 0, 0, 0][..])).is_inline());
499    /// assert!(Bytes::from(Vec::with_capacity(4)).is_inline());
500    /// assert!(!Bytes::from(&[0; 1024][..]).is_inline());
501    /// ```
502    pub fn is_inline(&self) -> bool {
503        self.inner.is_inline()
504    }
505
506    /// Creates `Bytes` instance from slice, by copying it.
507    pub fn copy_from_slice(data: &[u8]) -> Self {
508        Self::copy_from_slice_in(data, PoolId::DEFAULT)
509    }
510
511    /// Creates `Bytes` instance from slice, by copying it.
512    pub fn copy_from_slice_in<T>(data: &[u8], pool: T) -> Self
513    where
514        PoolRef: From<T>,
515    {
516        if data.len() <= INLINE_CAP {
517            Bytes {
518                inner: Inner::from_slice_inline(data),
519            }
520        } else {
521            Bytes {
522                inner: Inner::from_slice(data.len(), data, pool.into()),
523            }
524        }
525    }
526
527    /// Returns a slice of self for the provided range.
528    ///
529    /// This will increment the reference count for the underlying memory and
530    /// return a new `Bytes` handle set to the slice.
531    ///
532    /// This operation is `O(1)`.
533    ///
534    /// # Examples
535    ///
536    /// ```
537    /// use ntex_bytes::Bytes;
538    ///
539    /// let a = Bytes::from(b"hello world");
540    /// let b = a.slice(2..5);
541    ///
542    /// assert_eq!(&b[..], b"llo");
543    /// assert_eq!(&b[..=1], b"ll");
544    /// assert_eq!(&b[1..=1], b"l");
545    /// ```
546    ///
547    /// # Panics
548    ///
549    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
550    /// will panic.
551    pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
552        self.slice_checked(range)
553            .expect("Requires that `begin <= end` and `end <= self.len()`")
554    }
555
556    /// Returns a slice of self for the provided range.
557    ///
558    /// Does nothing if `begin <= end` or `end <= self.len()`
559    pub fn slice_checked(&self, range: impl RangeBounds<usize>) -> Option<Bytes> {
560        use std::ops::Bound;
561
562        let len = self.len();
563
564        let begin = match range.start_bound() {
565            Bound::Included(&n) => n,
566            Bound::Excluded(&n) => n + 1,
567            Bound::Unbounded => 0,
568        };
569
570        let end = match range.end_bound() {
571            Bound::Included(&n) => n + 1,
572            Bound::Excluded(&n) => n,
573            Bound::Unbounded => len,
574        };
575
576        if begin <= end && end <= len {
577            if end - begin <= INLINE_CAP {
578                Some(Bytes {
579                    inner: Inner::from_slice_inline(&self[begin..end]),
580                })
581            } else {
582                let mut ret = self.clone();
583                unsafe {
584                    ret.inner.set_end(end);
585                    ret.inner.set_start(begin);
586                }
587                Some(ret)
588            }
589        } else {
590            None
591        }
592    }
593
594    /// Returns a slice of self that is equivalent to the given `subset`.
595    ///
596    /// When processing a `Bytes` buffer with other tools, one often gets a
597    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
598    /// This function turns that `&[u8]` into another `Bytes`, as if one had
599    /// called `self.slice()` with the offsets that correspond to `subset`.
600    ///
601    /// This operation is `O(1)`.
602    ///
603    /// # Examples
604    ///
605    /// ```
606    /// use ntex_bytes::Bytes;
607    ///
608    /// let bytes = Bytes::from(&b"012345678"[..]);
609    /// let as_slice = bytes.as_ref();
610    /// let subset = &as_slice[2..6];
611    /// let subslice = bytes.slice_ref(&subset);
612    /// assert_eq!(subslice, b"2345");
613    /// ```
614    ///
615    /// # Panics
616    ///
617    /// Requires that the given `sub` slice is in fact contained within the
618    /// `Bytes` buffer; otherwise this function will panic.
619    pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
620        self.slice_ref_checked(subset)
621            .expect("Given `sub` slice is not contained within the `Bytes` buffer")
622    }
623
624    /// Returns a slice of self that is equivalent to the given `subset`.
625    pub fn slice_ref_checked(&self, subset: &[u8]) -> Option<Bytes> {
626        let bytes_p = self.as_ptr() as usize;
627        let bytes_len = self.len();
628
629        let sub_p = subset.as_ptr() as usize;
630        let sub_len = subset.len();
631
632        if sub_p >= bytes_p && sub_p + sub_len <= bytes_p + bytes_len {
633            let sub_offset = sub_p - bytes_p;
634            Some(self.slice(sub_offset..(sub_offset + sub_len)))
635        } else {
636            None
637        }
638    }
639
640    /// Splits the bytes into two at the given index.
641    ///
642    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
643    /// contains elements `[at, len)`.
644    ///
645    /// This is an `O(1)` operation that just increases the reference count and
646    /// sets a few indices.
647    ///
648    /// # Examples
649    ///
650    /// ```
651    /// use ntex_bytes::Bytes;
652    ///
653    /// let mut a = Bytes::from(&b"hello world"[..]);
654    /// let b = a.split_off(5);
655    ///
656    /// assert_eq!(a, b"hello");
657    /// assert_eq!(b, b" world");
658    /// ```
659    ///
660    /// # Panics
661    ///
662    /// Panics if `at > self.len()`.
663    pub fn split_off(&mut self, at: usize) -> Bytes {
664        self.split_off_checked(at)
665            .expect("at value must be <= self.len()`")
666    }
667
668    /// Splits the bytes into two at the given index.
669    ///
670    /// Does nothing if `at > self.len()`
671    pub fn split_off_checked(&mut self, at: usize) -> Option<Bytes> {
672        if at <= self.len() {
673            if at == self.len() {
674                Some(Bytes::new())
675            } else if at == 0 {
676                Some(mem::replace(self, Bytes::new()))
677            } else {
678                Some(Bytes {
679                    inner: self.inner.split_off(at, true),
680                })
681            }
682        } else {
683            None
684        }
685    }
686
687    /// Splits the bytes into two at the given index.
688    ///
689    /// Afterwards `self` contains elements `[at, len)`, and the returned
690    /// `Bytes` contains elements `[0, at)`.
691    ///
692    /// This is an `O(1)` operation that just increases the reference count and
693    /// sets a few indices.
694    ///
695    /// # Examples
696    ///
697    /// ```
698    /// use ntex_bytes::Bytes;
699    ///
700    /// let mut a = Bytes::from(&b"hello world"[..]);
701    /// let b = a.split_to(5);
702    ///
703    /// assert_eq!(a, b" world");
704    /// assert_eq!(b, b"hello");
705    /// ```
706    ///
707    /// # Panics
708    ///
709    /// Panics if `at > len`.
710    pub fn split_to(&mut self, at: usize) -> Bytes {
711        self.split_to_checked(at)
712            .expect("at value must be <= self.len()`")
713    }
714
715    /// Splits the bytes into two at the given index.
716    ///
717    /// Does nothing if `at > len`.
718    pub fn split_to_checked(&mut self, at: usize) -> Option<Bytes> {
719        if at <= self.len() {
720            if at == self.len() {
721                Some(mem::replace(self, Bytes::new()))
722            } else if at == 0 {
723                Some(Bytes::new())
724            } else {
725                Some(Bytes {
726                    inner: self.inner.split_to(at, true),
727                })
728            }
729        } else {
730            None
731        }
732    }
733
734    /// Shortens the buffer, keeping the first `len` bytes and dropping the
735    /// rest.
736    ///
737    /// If `len` is greater than the buffer's current length, this has no
738    /// effect.
739    ///
740    /// The [`split_off`] method can emulate `truncate`, but this causes the
741    /// excess bytes to be returned instead of dropped.
742    ///
743    /// # Examples
744    ///
745    /// ```
746    /// use ntex_bytes::Bytes;
747    ///
748    /// let mut buf = Bytes::from(&b"hello world"[..]);
749    /// buf.truncate(5);
750    /// assert_eq!(buf, b"hello"[..]);
751    /// ```
752    ///
753    /// [`split_off`]: #method.split_off
754    #[inline]
755    pub fn truncate(&mut self, len: usize) {
756        self.inner.truncate(len, true);
757    }
758
759    /// Shortens the buffer to `len` bytes and dropping the rest.
760    ///
761    /// This is useful if underlying buffer is larger than cuurrent bytes object.
762    ///
763    /// # Examples
764    ///
765    /// ```
766    /// use ntex_bytes::Bytes;
767    ///
768    /// let mut buf = Bytes::from(&b"hello world"[..]);
769    /// buf.trimdown();
770    /// assert_eq!(buf, b"hello world"[..]);
771    /// ```
772    #[inline]
773    pub fn trimdown(&mut self) {
774        let kind = self.inner.kind();
775
776        // trim down only if buffer is not inline or static and
777        // buffer's unused space is greater than 64 bytes
778        if !(kind == KIND_INLINE || kind == KIND_STATIC) {
779            if self.inner.len() <= INLINE_CAP {
780                *self = Bytes {
781                    inner: Inner::from_slice_inline(self),
782                };
783            } else if self.inner.capacity() - self.inner.len() >= 64 {
784                *self = Bytes {
785                    inner: Inner::from_slice(self.len(), self, self.inner.pool()),
786                }
787            }
788        }
789    }
790
791    /// Clears the buffer, removing all data.
792    ///
793    /// # Examples
794    ///
795    /// ```
796    /// use ntex_bytes::Bytes;
797    ///
798    /// let mut buf = Bytes::from(&b"hello world"[..]);
799    /// buf.clear();
800    /// assert!(buf.is_empty());
801    /// ```
802    #[inline]
803    pub fn clear(&mut self) {
804        self.inner = Inner::empty_inline();
805    }
806
807    /// Attempts to convert into a `BytesMut` handle.
808    ///
809    /// This will only succeed if there are no other outstanding references to
810    /// the underlying chunk of memory. `Bytes` handles that contain inlined
811    /// bytes will always be convertible to `BytesMut`.
812    ///
813    /// # Examples
814    ///
815    /// ```
816    /// use ntex_bytes::Bytes;
817    ///
818    /// let a = Bytes::copy_from_slice(&b"Mary had a little lamb, little lamb, little lamb..."[..]);
819    ///
820    /// // Create a shallow clone
821    /// let b = a.clone();
822    ///
823    /// // This will fail because `b` shares a reference with `a`
824    /// let a = a.try_mut().unwrap_err();
825    ///
826    /// drop(b);
827    ///
828    /// // This will succeed
829    /// let mut a = a.try_mut().unwrap();
830    ///
831    /// a[0] = b'b';
832    ///
833    /// assert_eq!(&a[..4], b"bary");
834    /// ```
835    pub fn try_mut(self) -> Result<BytesMut, Bytes> {
836        if self.inner.is_mut_safe() {
837            Ok(BytesMut { inner: self.inner })
838        } else {
839            Err(self)
840        }
841    }
842
843    /// Returns an iterator over the bytes contained by the buffer.
844    ///
845    /// # Examples
846    ///
847    /// ```
848    /// use ntex_bytes::{Buf, Bytes};
849    ///
850    /// let buf = Bytes::from(&b"abc"[..]);
851    /// let mut iter = buf.iter();
852    ///
853    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
854    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
855    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
856    /// assert_eq!(iter.next(), None);
857    /// ```
858    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
859        self.chunk().iter()
860    }
861}
862
863impl Buf for Bytes {
864    #[inline]
865    fn remaining(&self) -> usize {
866        self.len()
867    }
868
869    #[inline]
870    fn chunk(&self) -> &[u8] {
871        self.inner.as_ref()
872    }
873
874    #[inline]
875    fn advance(&mut self, cnt: usize) {
876        assert!(
877            cnt <= self.inner.as_ref().len(),
878            "cannot advance past `remaining`"
879        );
880        unsafe {
881            self.inner.set_start(cnt);
882        }
883    }
884}
885
886impl bytes::buf::Buf for Bytes {
887    #[inline]
888    fn remaining(&self) -> usize {
889        self.len()
890    }
891
892    #[inline]
893    fn chunk(&self) -> &[u8] {
894        self.inner.as_ref()
895    }
896
897    #[inline]
898    fn advance(&mut self, cnt: usize) {
899        assert!(
900            cnt <= self.inner.as_ref().len(),
901            "cannot advance past `remaining`"
902        );
903        unsafe {
904            self.inner.set_start(cnt);
905        }
906    }
907}
908
909impl Clone for Bytes {
910    fn clone(&self) -> Bytes {
911        Bytes {
912            inner: unsafe { self.inner.shallow_clone() },
913        }
914    }
915}
916
917impl AsRef<[u8]> for Bytes {
918    #[inline]
919    fn as_ref(&self) -> &[u8] {
920        self.inner.as_ref()
921    }
922}
923
924impl Deref for Bytes {
925    type Target = [u8];
926
927    #[inline]
928    fn deref(&self) -> &[u8] {
929        self.inner.as_ref()
930    }
931}
932
933impl From<&Bytes> for Bytes {
934    fn from(src: &Bytes) -> Bytes {
935        src.clone()
936    }
937}
938
939impl From<BytesMut> for Bytes {
940    fn from(src: BytesMut) -> Bytes {
941        src.freeze()
942    }
943}
944
945impl From<Vec<u8>> for Bytes {
946    /// Convert a `Vec` into a `Bytes`
947    ///
948    /// This constructor may be used to avoid the inlining optimization used by
949    /// `with_capacity`.  A `Bytes` constructed this way will always store its
950    /// data on the heap.
951    fn from(src: Vec<u8>) -> Bytes {
952        if src.is_empty() {
953            Bytes::new()
954        } else if src.len() <= INLINE_CAP {
955            Bytes {
956                inner: Inner::from_slice_inline(&src),
957            }
958        } else {
959            BytesMut::from(src).freeze()
960        }
961    }
962}
963
964impl From<String> for Bytes {
965    fn from(src: String) -> Bytes {
966        if src.is_empty() {
967            Bytes::new()
968        } else if src.bytes().len() <= INLINE_CAP {
969            Bytes {
970                inner: Inner::from_slice_inline(src.as_bytes()),
971            }
972        } else {
973            BytesMut::from(src).freeze()
974        }
975    }
976}
977
978impl From<&'static [u8]> for Bytes {
979    fn from(src: &'static [u8]) -> Bytes {
980        Bytes::from_static(src)
981    }
982}
983
984impl From<&'static str> for Bytes {
985    fn from(src: &'static str) -> Bytes {
986        Bytes::from_static(src.as_bytes())
987    }
988}
989
990impl<'a, const N: usize> From<&'a [u8; N]> for Bytes {
991    fn from(src: &'a [u8; N]) -> Bytes {
992        Bytes::copy_from_slice(src)
993    }
994}
995
996impl FromIterator<u8> for Bytes {
997    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
998        BytesMut::from_iter(into_iter).freeze()
999    }
1000}
1001
1002impl<'a> FromIterator<&'a u8> for Bytes {
1003    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1004        BytesMut::from_iter(into_iter).freeze()
1005    }
1006}
1007
1008impl Eq for Bytes {}
1009
1010impl PartialEq for Bytes {
1011    fn eq(&self, other: &Bytes) -> bool {
1012        self.inner.as_ref() == other.inner.as_ref()
1013    }
1014}
1015
1016impl PartialOrd for Bytes {
1017    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
1018        Some(self.cmp(other))
1019    }
1020}
1021
1022impl Ord for Bytes {
1023    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
1024        self.inner.as_ref().cmp(other.inner.as_ref())
1025    }
1026}
1027
1028impl Default for Bytes {
1029    #[inline]
1030    fn default() -> Bytes {
1031        Bytes::new()
1032    }
1033}
1034
1035impl fmt::Debug for Bytes {
1036    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1037        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
1038    }
1039}
1040
1041impl hash::Hash for Bytes {
1042    fn hash<H>(&self, state: &mut H)
1043    where
1044        H: hash::Hasher,
1045    {
1046        let s: &[u8] = self.as_ref();
1047        s.hash(state);
1048    }
1049}
1050
1051impl Borrow<[u8]> for Bytes {
1052    fn borrow(&self) -> &[u8] {
1053        self.as_ref()
1054    }
1055}
1056
1057impl IntoIterator for Bytes {
1058    type Item = u8;
1059    type IntoIter = IntoIter<Bytes>;
1060
1061    fn into_iter(self) -> Self::IntoIter {
1062        IntoIter::new(self)
1063    }
1064}
1065
1066impl<'a> IntoIterator for &'a Bytes {
1067    type Item = &'a u8;
1068    type IntoIter = std::slice::Iter<'a, u8>;
1069
1070    fn into_iter(self) -> Self::IntoIter {
1071        self.as_ref().iter()
1072    }
1073}
1074
1075/*
1076 *
1077 * ===== BytesMut =====
1078 *
1079 */
1080
1081impl BytesMut {
1082    /// Creates a new `BytesMut` with the specified capacity.
1083    ///
1084    /// The returned `BytesMut` will be able to hold at least `capacity` bytes
1085    /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
1086    /// then `BytesMut` will not allocate.
1087    ///
1088    /// It is important to note that this function does not specify the length
1089    /// of the returned `BytesMut`, but only the capacity.
1090    ///
1091    /// # Panics
1092    ///
1093    /// Panics if `capacity` greater than 60bit for 64bit systems
1094    /// and 28bit for 32bit systems
1095    ///
1096    /// # Examples
1097    ///
1098    /// ```
1099    /// use ntex_bytes::{BytesMut, BufMut};
1100    ///
1101    /// let mut bytes = BytesMut::with_capacity(64);
1102    ///
1103    /// // `bytes` contains no data, even though there is capacity
1104    /// assert_eq!(bytes.len(), 0);
1105    ///
1106    /// bytes.put(&b"hello world"[..]);
1107    ///
1108    /// assert_eq!(&bytes[..], b"hello world");
1109    /// ```
1110    #[inline]
1111    pub fn with_capacity(capacity: usize) -> BytesMut {
1112        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1113    }
1114
1115    /// Creates a new `BytesMut` with the specified capacity and in specified memory pool.
1116    ///
1117    /// # Examples
1118    ///
1119    /// ```
1120    /// use ntex_bytes::{BytesMut, BufMut, PoolId};
1121    ///
1122    /// let mut bytes = BytesMut::with_capacity_in(64, PoolId::P1);
1123    ///
1124    /// // `bytes` contains no data, even though there is capacity
1125    /// assert_eq!(bytes.len(), 0);
1126    ///
1127    /// bytes.put(&b"hello world"[..]);
1128    ///
1129    /// assert_eq!(&bytes[..], b"hello world");
1130    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
1131    /// ```
1132    #[inline]
1133    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesMut
1134    where
1135        PoolRef: From<T>,
1136    {
1137        BytesMut {
1138            inner: Inner::with_capacity(capacity, pool.into()),
1139        }
1140    }
1141
1142    /// Creates a new `BytesMut` from slice, by copying it.
1143    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
1144        Self::copy_from_slice_in(src, PoolId::DEFAULT)
1145    }
1146
1147    /// Creates a new `BytesMut` from slice, by copying it.
1148    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
1149    where
1150        T: AsRef<[u8]>,
1151        PoolRef: From<U>,
1152    {
1153        let s = src.as_ref();
1154        BytesMut {
1155            inner: Inner::from_slice(s.len(), s, pool.into()),
1156        }
1157    }
1158
1159    #[inline]
1160    /// Convert a `Vec` into a `BytesMut`
1161    pub fn from_vec<T>(src: Vec<u8>, pool: T) -> BytesMut
1162    where
1163        PoolRef: From<T>,
1164    {
1165        BytesMut {
1166            inner: Inner::from_vec(src, pool.into()),
1167        }
1168    }
1169
1170    /// Creates a new `BytesMut` with default capacity.
1171    ///
1172    /// Resulting object has length 0 and unspecified capacity.
1173    /// This function does not allocate.
1174    ///
1175    /// # Examples
1176    ///
1177    /// ```
1178    /// use ntex_bytes::{BytesMut, BufMut};
1179    ///
1180    /// let mut bytes = BytesMut::new();
1181    ///
1182    /// assert_eq!(0, bytes.len());
1183    ///
1184    /// bytes.reserve(2);
1185    /// bytes.put_slice(b"xy");
1186    ///
1187    /// assert_eq!(&b"xy"[..], &bytes[..]);
1188    /// ```
1189    #[inline]
1190    pub fn new() -> BytesMut {
1191        BytesMut::with_capacity(MIN_NON_ZERO_CAP)
1192    }
1193
1194    /// Returns the number of bytes contained in this `BytesMut`.
1195    ///
1196    /// # Examples
1197    ///
1198    /// ```
1199    /// use ntex_bytes::BytesMut;
1200    ///
1201    /// let b = BytesMut::from(&b"hello"[..]);
1202    /// assert_eq!(b.len(), 5);
1203    /// ```
1204    #[inline]
1205    pub fn len(&self) -> usize {
1206        self.inner.len()
1207    }
1208
1209    /// Returns true if the `BytesMut` has a length of 0.
1210    ///
1211    /// # Examples
1212    ///
1213    /// ```
1214    /// use ntex_bytes::BytesMut;
1215    ///
1216    /// let b = BytesMut::with_capacity(64);
1217    /// assert!(b.is_empty());
1218    /// ```
1219    #[inline]
1220    pub fn is_empty(&self) -> bool {
1221        self.inner.is_empty()
1222    }
1223
1224    /// Returns the number of bytes the `BytesMut` can hold without reallocating.
1225    ///
1226    /// # Examples
1227    ///
1228    /// ```
1229    /// use ntex_bytes::BytesMut;
1230    ///
1231    /// let b = BytesMut::with_capacity(64);
1232    /// assert_eq!(b.capacity(), 64);
1233    /// ```
1234    #[inline]
1235    pub fn capacity(&self) -> usize {
1236        self.inner.capacity()
1237    }
1238
1239    /// Converts `self` into an immutable `Bytes`.
1240    ///
1241    /// The conversion is zero cost and is used to indicate that the slice
1242    /// referenced by the handle will no longer be mutated. Once the conversion
1243    /// is done, the handle can be cloned and shared across threads.
1244    ///
1245    /// # Examples
1246    ///
1247    /// ```
1248    /// use ntex_bytes::{BytesMut, BufMut};
1249    /// use std::thread;
1250    ///
1251    /// let mut b = BytesMut::with_capacity(64);
1252    /// b.put("hello world");
1253    /// let b1 = b.freeze();
1254    /// let b2 = b1.clone();
1255    ///
1256    /// let th = thread::spawn(move || {
1257    ///     assert_eq!(b1, b"hello world");
1258    /// });
1259    ///
1260    /// assert_eq!(b2, b"hello world");
1261    /// th.join().unwrap();
1262    /// ```
1263    #[inline]
1264    pub fn freeze(self) -> Bytes {
1265        if self.inner.len() <= INLINE_CAP {
1266            Bytes {
1267                inner: Inner::from_slice_inline(self.inner.as_ref()),
1268            }
1269        } else {
1270            Bytes { inner: self.inner }
1271        }
1272    }
1273
1274    /// Splits the bytes into two at the given index.
1275    ///
1276    /// Afterwards `self` contains elements `[0, at)`, and the returned
1277    /// `BytesMut` contains elements `[at, capacity)`.
1278    ///
1279    /// This is an `O(1)` operation that just increases the reference count
1280    /// and sets a few indices.
1281    ///
1282    /// # Examples
1283    ///
1284    /// ```
1285    /// use ntex_bytes::BytesMut;
1286    ///
1287    /// let mut a = BytesMut::from(&b"hello world"[..]);
1288    /// let mut b = a.split_off(5);
1289    ///
1290    /// a[0] = b'j';
1291    /// b[0] = b'!';
1292    ///
1293    /// assert_eq!(&a[..], b"jello");
1294    /// assert_eq!(&b[..], b"!world");
1295    /// ```
1296    ///
1297    /// # Panics
1298    ///
1299    /// Panics if `at > capacity`.
1300    pub fn split_off(&mut self, at: usize) -> BytesMut {
1301        BytesMut {
1302            inner: self.inner.split_off(at, false),
1303        }
1304    }
1305
1306    /// Removes the bytes from the current view, returning them in a new
1307    /// `BytesMut` handle.
1308    ///
1309    /// Afterwards, `self` will be empty, but will retain any additional
1310    /// capacity that it had before the operation. This is identical to
1311    /// `self.split_to(self.len())`.
1312    ///
1313    /// This is an `O(1)` operation that just increases the reference count and
1314    /// sets a few indices.
1315    ///
1316    /// # Examples
1317    ///
1318    /// ```
1319    /// use ntex_bytes::{BytesMut, BufMut};
1320    ///
1321    /// let mut buf = BytesMut::with_capacity(1024);
1322    /// buf.put(&b"hello world"[..]);
1323    ///
1324    /// let other = buf.split();
1325    ///
1326    /// assert!(buf.is_empty());
1327    /// assert_eq!(1013, buf.capacity());
1328    ///
1329    /// assert_eq!(other, b"hello world"[..]);
1330    /// ```
1331    pub fn split(&mut self) -> BytesMut {
1332        self.split_to(self.len())
1333    }
1334
1335    /// Splits the buffer into two at the given index.
1336    ///
1337    /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
1338    /// contains elements `[0, at)`.
1339    ///
1340    /// This is an `O(1)` operation that just increases the reference count and
1341    /// sets a few indices.
1342    ///
1343    /// # Examples
1344    ///
1345    /// ```
1346    /// use ntex_bytes::BytesMut;
1347    ///
1348    /// let mut a = BytesMut::from(&b"hello world"[..]);
1349    /// let mut b = a.split_to(5);
1350    ///
1351    /// a[0] = b'!';
1352    /// b[0] = b'j';
1353    ///
1354    /// assert_eq!(&a[..], b"!world");
1355    /// assert_eq!(&b[..], b"jello");
1356    /// ```
1357    ///
1358    /// # Panics
1359    ///
1360    /// Panics if `at > len`.
1361    pub fn split_to(&mut self, at: usize) -> BytesMut {
1362        self.split_to_checked(at)
1363            .expect("at value must be <= self.len()`")
1364    }
1365
1366    /// Splits the bytes into two at the given index.
1367    ///
1368    /// Does nothing if `at > len`.
1369    pub fn split_to_checked(&mut self, at: usize) -> Option<BytesMut> {
1370        if at <= self.len() {
1371            Some(BytesMut {
1372                inner: self.inner.split_to(at, false),
1373            })
1374        } else {
1375            None
1376        }
1377    }
1378
1379    /// Shortens the buffer, keeping the first `len` bytes and dropping the
1380    /// rest.
1381    ///
1382    /// If `len` is greater than the buffer's current length, this has no
1383    /// effect.
1384    ///
1385    /// The [`split_off`] method can emulate `truncate`, but this causes the
1386    /// excess bytes to be returned instead of dropped.
1387    ///
1388    /// # Examples
1389    ///
1390    /// ```
1391    /// use ntex_bytes::BytesMut;
1392    ///
1393    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1394    /// buf.truncate(5);
1395    /// assert_eq!(buf, b"hello"[..]);
1396    /// ```
1397    ///
1398    /// [`split_off`]: #method.split_off
1399    pub fn truncate(&mut self, len: usize) {
1400        self.inner.truncate(len, false);
1401    }
1402
1403    /// Clears the buffer, removing all data.
1404    ///
1405    /// # Examples
1406    ///
1407    /// ```
1408    /// use ntex_bytes::BytesMut;
1409    ///
1410    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1411    /// buf.clear();
1412    /// assert!(buf.is_empty());
1413    /// ```
1414    pub fn clear(&mut self) {
1415        self.truncate(0);
1416    }
1417
1418    /// Resizes the buffer so that `len` is equal to `new_len`.
1419    ///
1420    /// If `new_len` is greater than `len`, the buffer is extended by the
1421    /// difference with each additional byte set to `value`. If `new_len` is
1422    /// less than `len`, the buffer is simply truncated.
1423    ///
1424    /// # Panics
1425    ///
1426    /// Panics if `new_len` greater than 60bit for 64bit systems
1427    /// and 28bit for 32bit systems
1428    ///
1429    /// # Examples
1430    ///
1431    /// ```
1432    /// use ntex_bytes::BytesMut;
1433    ///
1434    /// let mut buf = BytesMut::new();
1435    ///
1436    /// buf.resize(3, 0x1);
1437    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
1438    ///
1439    /// buf.resize(2, 0x2);
1440    /// assert_eq!(&buf[..], &[0x1, 0x1]);
1441    ///
1442    /// buf.resize(4, 0x3);
1443    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
1444    /// ```
1445    #[inline]
1446    pub fn resize(&mut self, new_len: usize, value: u8) {
1447        self.inner.resize(new_len, value);
1448    }
1449
1450    /// Sets the length of the buffer.
1451    ///
1452    /// This will explicitly set the size of the buffer without actually
1453    /// modifying the data, so it is up to the caller to ensure that the data
1454    /// has been initialized.
1455    ///
1456    /// # Examples
1457    ///
1458    /// ```
1459    /// use ntex_bytes::BytesMut;
1460    ///
1461    /// let mut b = BytesMut::from(&b"hello world"[..]);
1462    ///
1463    /// unsafe {
1464    ///     b.set_len(5);
1465    /// }
1466    ///
1467    /// assert_eq!(&b[..], b"hello");
1468    ///
1469    /// unsafe {
1470    ///     b.set_len(11);
1471    /// }
1472    ///
1473    /// assert_eq!(&b[..], b"hello world");
1474    /// ```
1475    ///
1476    /// # Panics
1477    ///
1478    /// This method will panic if `len` is out of bounds for the underlying
1479    /// slice or if it comes after the `end` of the configured window.
1480    #[inline]
1481    #[allow(clippy::missing_safety_doc)]
1482    pub unsafe fn set_len(&mut self, len: usize) {
1483        self.inner.set_len(len)
1484    }
1485
1486    /// Reserves capacity for at least `additional` more bytes to be inserted
1487    /// into the given `BytesMut`.
1488    ///
1489    /// More than `additional` bytes may be reserved in order to avoid frequent
1490    /// reallocations. A call to `reserve` may result in an allocation.
1491    ///
1492    /// Before allocating new buffer space, the function will attempt to reclaim
1493    /// space in the existing buffer. If the current handle references a small
1494    /// view in the original buffer and all other handles have been dropped,
1495    /// and the requested capacity is less than or equal to the existing
1496    /// buffer's capacity, then the current view will be copied to the front of
1497    /// the buffer and the handle will take ownership of the full buffer.
1498    ///
1499    /// # Panics
1500    ///
1501    /// Panics if new capacity is greater than 60bit for 64bit systems
1502    /// and 28bit for 32bit systems
1503    ///
1504    /// # Examples
1505    ///
1506    /// In the following example, a new buffer is allocated.
1507    ///
1508    /// ```
1509    /// use ntex_bytes::BytesMut;
1510    ///
1511    /// let mut buf = BytesMut::from(&b"hello"[..]);
1512    /// buf.reserve(64);
1513    /// assert!(buf.capacity() >= 69);
1514    /// ```
1515    ///
1516    /// In the following example, the existing buffer is reclaimed.
1517    ///
1518    /// ```
1519    /// use ntex_bytes::{BytesMut, BufMut};
1520    ///
1521    /// let mut buf = BytesMut::with_capacity(128);
1522    /// buf.put(&[0; 64][..]);
1523    ///
1524    /// let ptr = buf.as_ptr();
1525    /// let other = buf.split();
1526    ///
1527    /// assert!(buf.is_empty());
1528    /// assert_eq!(buf.capacity(), 64);
1529    ///
1530    /// drop(other);
1531    /// buf.reserve(128);
1532    ///
1533    /// assert_eq!(buf.capacity(), 128);
1534    /// assert_eq!(buf.as_ptr(), ptr);
1535    /// ```
1536    ///
1537    /// # Panics
1538    ///
1539    /// Panics if the new capacity overflows `usize`.
1540    #[inline]
1541    pub fn reserve(&mut self, additional: usize) {
1542        let len = self.len();
1543        let rem = self.capacity() - len;
1544
1545        if additional <= rem {
1546            // The handle can already store at least `additional` more bytes, so
1547            // there is no further work needed to be done.
1548            return;
1549        }
1550
1551        self.inner.reserve_inner(additional);
1552    }
1553
1554    /// Appends given bytes to this object.
1555    ///
1556    /// If this `BytesMut` object has not enough capacity, it is resized first.
1557    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
1558    ///
1559    /// # Examples
1560    ///
1561    /// ```
1562    /// use ntex_bytes::BytesMut;
1563    ///
1564    /// let mut buf = BytesMut::with_capacity(0);
1565    /// buf.extend_from_slice(b"aaabbb");
1566    /// buf.extend_from_slice(b"cccddd");
1567    ///
1568    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
1569    /// ```
1570    #[inline]
1571    pub fn extend_from_slice(&mut self, extend: &[u8]) {
1572        self.put_slice(extend);
1573    }
1574
1575    /// Returns an iterator over the bytes contained by the buffer.
1576    ///
1577    /// # Examples
1578    ///
1579    /// ```
1580    /// use ntex_bytes::{Buf, BytesMut};
1581    ///
1582    /// let buf = BytesMut::from(&b"abc"[..]);
1583    /// let mut iter = buf.iter();
1584    ///
1585    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
1586    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
1587    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
1588    /// assert_eq!(iter.next(), None);
1589    /// ```
1590    #[inline]
1591    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
1592        self.chunk().iter()
1593    }
1594
1595    #[cfg(feature = "mpool")]
1596    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
1597        self.inner.move_to_pool(pool);
1598    }
1599}
1600
1601impl Buf for BytesMut {
1602    #[inline]
1603    fn remaining(&self) -> usize {
1604        self.len()
1605    }
1606
1607    #[inline]
1608    fn chunk(&self) -> &[u8] {
1609        self.inner.as_ref()
1610    }
1611
1612    #[inline]
1613    fn advance(&mut self, cnt: usize) {
1614        assert!(
1615            cnt <= self.inner.as_ref().len(),
1616            "cannot advance past `remaining`"
1617        );
1618        unsafe {
1619            self.inner.set_start(cnt);
1620        }
1621    }
1622}
1623
1624impl BufMut for BytesMut {
1625    #[inline]
1626    fn remaining_mut(&self) -> usize {
1627        self.capacity() - self.len()
1628    }
1629
1630    #[inline]
1631    unsafe fn advance_mut(&mut self, cnt: usize) {
1632        let new_len = self.len() + cnt;
1633
1634        // This call will panic if `cnt` is too big
1635        self.inner.set_len(new_len);
1636    }
1637
1638    #[inline]
1639    fn chunk_mut(&mut self) -> &mut UninitSlice {
1640        let len = self.len();
1641
1642        unsafe {
1643            // This will never panic as `len` can never become invalid
1644            let ptr = &mut self.inner.as_raw()[len..];
1645
1646            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
1647        }
1648    }
1649
1650    #[inline]
1651    fn put_slice(&mut self, src: &[u8]) {
1652        let len = src.len();
1653        self.reserve(len);
1654
1655        unsafe {
1656            ptr::copy_nonoverlapping(src.as_ptr(), self.chunk_mut().as_mut_ptr(), len);
1657            self.advance_mut(len);
1658        }
1659    }
1660
1661    #[inline]
1662    fn put_u8(&mut self, n: u8) {
1663        self.reserve(1);
1664        self.inner.put_u8(n);
1665    }
1666
1667    #[inline]
1668    fn put_i8(&mut self, n: i8) {
1669        self.reserve(1);
1670        self.put_u8(n as u8);
1671    }
1672}
1673
1674impl bytes::buf::Buf for BytesMut {
1675    #[inline]
1676    fn remaining(&self) -> usize {
1677        self.len()
1678    }
1679
1680    #[inline]
1681    fn chunk(&self) -> &[u8] {
1682        self.inner.as_ref()
1683    }
1684
1685    #[inline]
1686    fn advance(&mut self, cnt: usize) {
1687        Buf::advance(self, cnt)
1688    }
1689}
1690
1691unsafe impl bytes::buf::BufMut for BytesMut {
1692    #[inline]
1693    fn remaining_mut(&self) -> usize {
1694        BufMut::remaining_mut(self)
1695    }
1696
1697    #[inline]
1698    unsafe fn advance_mut(&mut self, cnt: usize) {
1699        BufMut::advance_mut(self, cnt)
1700    }
1701
1702    #[inline]
1703    fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice {
1704        let len = self.len();
1705        unsafe {
1706            // This will never panic as `len` can never become invalid
1707            let ptr = &mut self.inner.as_raw()[len..];
1708            bytes::buf::UninitSlice::from_raw_parts_mut(
1709                ptr.as_mut_ptr(),
1710                self.capacity() - len,
1711            )
1712        }
1713    }
1714
1715    #[inline]
1716    fn put_slice(&mut self, src: &[u8]) {
1717        BufMut::put_slice(self, src)
1718    }
1719
1720    #[inline]
1721    fn put_u8(&mut self, n: u8) {
1722        BufMut::put_u8(self, n)
1723    }
1724
1725    #[inline]
1726    fn put_i8(&mut self, n: i8) {
1727        BufMut::put_i8(self, n)
1728    }
1729}
1730
1731impl AsRef<[u8]> for BytesMut {
1732    #[inline]
1733    fn as_ref(&self) -> &[u8] {
1734        self.inner.as_ref()
1735    }
1736}
1737
1738impl AsMut<[u8]> for BytesMut {
1739    #[inline]
1740    fn as_mut(&mut self) -> &mut [u8] {
1741        self.inner.as_mut()
1742    }
1743}
1744
1745impl Deref for BytesMut {
1746    type Target = [u8];
1747
1748    #[inline]
1749    fn deref(&self) -> &[u8] {
1750        self.as_ref()
1751    }
1752}
1753
1754impl DerefMut for BytesMut {
1755    #[inline]
1756    fn deref_mut(&mut self) -> &mut [u8] {
1757        self.inner.as_mut()
1758    }
1759}
1760
1761impl From<Vec<u8>> for BytesMut {
1762    #[inline]
1763    /// Convert a `Vec` into a `BytesMut`
1764    ///
1765    /// This constructor may be used to avoid the inlining optimization used by
1766    /// `with_capacity`.  A `BytesMut` constructed this way will always store
1767    /// its data on the heap.
1768    fn from(src: Vec<u8>) -> BytesMut {
1769        BytesMut::from_vec(src, PoolId::DEFAULT.pool_ref())
1770    }
1771}
1772
1773impl From<String> for BytesMut {
1774    #[inline]
1775    fn from(src: String) -> BytesMut {
1776        BytesMut::from_vec(src.into_bytes(), PoolId::DEFAULT.pool_ref())
1777    }
1778}
1779
1780impl<'a> From<&'a [u8]> for BytesMut {
1781    fn from(src: &'a [u8]) -> BytesMut {
1782        if src.is_empty() {
1783            BytesMut::new()
1784        } else {
1785            BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1786        }
1787    }
1788}
1789
1790impl<const N: usize> From<[u8; N]> for BytesMut {
1791    fn from(src: [u8; N]) -> BytesMut {
1792        BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1793    }
1794}
1795
1796impl<'a, const N: usize> From<&'a [u8; N]> for BytesMut {
1797    fn from(src: &'a [u8; N]) -> BytesMut {
1798        BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1799    }
1800}
1801
1802impl<'a> From<&'a str> for BytesMut {
1803    #[inline]
1804    fn from(src: &'a str) -> BytesMut {
1805        BytesMut::from(src.as_bytes())
1806    }
1807}
1808
1809impl From<Bytes> for BytesMut {
1810    #[inline]
1811    fn from(src: Bytes) -> BytesMut {
1812        src.try_mut()
1813            .unwrap_or_else(|src| BytesMut::copy_from_slice_in(&src[..], src.inner.pool()))
1814    }
1815}
1816
1817impl Eq for BytesMut {}
1818
1819impl PartialEq for BytesMut {
1820    #[inline]
1821    fn eq(&self, other: &BytesMut) -> bool {
1822        self.inner.as_ref() == other.inner.as_ref()
1823    }
1824}
1825
1826impl Default for BytesMut {
1827    #[inline]
1828    fn default() -> BytesMut {
1829        BytesMut::new()
1830    }
1831}
1832
1833impl Borrow<[u8]> for BytesMut {
1834    #[inline]
1835    fn borrow(&self) -> &[u8] {
1836        self.as_ref()
1837    }
1838}
1839
1840impl BorrowMut<[u8]> for BytesMut {
1841    #[inline]
1842    fn borrow_mut(&mut self) -> &mut [u8] {
1843        self.as_mut()
1844    }
1845}
1846
1847impl fmt::Debug for BytesMut {
1848    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1849        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
1850    }
1851}
1852
1853impl fmt::Write for BytesMut {
1854    #[inline]
1855    fn write_str(&mut self, s: &str) -> fmt::Result {
1856        if self.remaining_mut() >= s.len() {
1857            self.put_slice(s.as_bytes());
1858            Ok(())
1859        } else {
1860            Err(fmt::Error)
1861        }
1862    }
1863
1864    #[inline]
1865    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1866        fmt::write(self, args)
1867    }
1868}
1869
1870impl Clone for BytesMut {
1871    #[inline]
1872    fn clone(&self) -> BytesMut {
1873        BytesMut::from(&self[..])
1874    }
1875}
1876
1877impl IntoIterator for BytesMut {
1878    type Item = u8;
1879    type IntoIter = IntoIter<BytesMut>;
1880
1881    fn into_iter(self) -> Self::IntoIter {
1882        IntoIter::new(self)
1883    }
1884}
1885
1886impl<'a> IntoIterator for &'a BytesMut {
1887    type Item = &'a u8;
1888    type IntoIter = std::slice::Iter<'a, u8>;
1889
1890    fn into_iter(self) -> Self::IntoIter {
1891        self.as_ref().iter()
1892    }
1893}
1894
1895impl FromIterator<u8> for BytesMut {
1896    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1897        let iter = into_iter.into_iter();
1898        let (min, maybe_max) = iter.size_hint();
1899
1900        let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min));
1901        for i in iter {
1902            out.reserve(1);
1903            out.put_u8(i);
1904        }
1905
1906        out
1907    }
1908}
1909
1910impl<'a> FromIterator<&'a u8> for BytesMut {
1911    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1912        into_iter.into_iter().copied().collect::<BytesMut>()
1913    }
1914}
1915
1916impl Extend<u8> for BytesMut {
1917    fn extend<T>(&mut self, iter: T)
1918    where
1919        T: IntoIterator<Item = u8>,
1920    {
1921        let iter = iter.into_iter();
1922
1923        let (lower, _) = iter.size_hint();
1924        self.reserve(lower);
1925
1926        for b in iter {
1927            self.put_u8(b);
1928        }
1929    }
1930}
1931
1932impl<'a> Extend<&'a u8> for BytesMut {
1933    fn extend<T>(&mut self, iter: T)
1934    where
1935        T: IntoIterator<Item = &'a u8>,
1936    {
1937        self.extend(iter.into_iter().copied())
1938    }
1939}
1940
1941/*
1942 *
1943 * ===== BytesVec =====
1944 *
1945 */
1946
1947impl BytesVec {
1948    /// Creates a new `BytesVec` with the specified capacity.
1949    ///
1950    /// The returned `BytesVec` will be able to hold at least `capacity` bytes
1951    /// without reallocating.
1952    ///
1953    /// It is important to note that this function does not specify the length
1954    /// of the returned `BytesVec`, but only the capacity.
1955    ///
1956    /// # Panics
1957    ///
1958    /// Panics if `capacity` greater than 60bit for 64bit systems
1959    /// and 28bit for 32bit systems
1960    ///
1961    /// # Examples
1962    ///
1963    /// ```
1964    /// use ntex_bytes::{BytesVec, BufMut};
1965    ///
1966    /// let mut bytes = BytesVec::with_capacity(64);
1967    ///
1968    /// // `bytes` contains no data, even though there is capacity
1969    /// assert_eq!(bytes.len(), 0);
1970    ///
1971    /// bytes.put(&b"hello world"[..]);
1972    ///
1973    /// assert_eq!(&bytes[..], b"hello world");
1974    /// ```
1975    #[inline]
1976    pub fn with_capacity(capacity: usize) -> BytesVec {
1977        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1978    }
1979
1980    /// Creates a new `BytesVec` with the specified capacity and in specified memory pool.
1981    ///
1982    /// # Examples
1983    ///
1984    /// ```
1985    /// use ntex_bytes::{BytesVec, BufMut, PoolId};
1986    ///
1987    /// let mut bytes = BytesVec::with_capacity_in(64, PoolId::P1);
1988    ///
1989    /// // `bytes` contains no data, even though there is capacity
1990    /// assert_eq!(bytes.len(), 0);
1991    ///
1992    /// bytes.put(&b"hello world"[..]);
1993    ///
1994    /// assert_eq!(&bytes[..], b"hello world");
1995    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
1996    /// ```
1997    #[inline]
1998    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesVec
1999    where
2000        PoolRef: From<T>,
2001    {
2002        BytesVec {
2003            inner: InnerVec::with_capacity(capacity, pool.into()),
2004        }
2005    }
2006
2007    /// Creates a new `BytesVec` from slice, by copying it.
2008    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
2009        Self::copy_from_slice_in(src, PoolId::DEFAULT)
2010    }
2011
2012    /// Creates a new `BytesVec` from slice, by copying it.
2013    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
2014    where
2015        T: AsRef<[u8]>,
2016        PoolRef: From<U>,
2017    {
2018        let s = src.as_ref();
2019        BytesVec {
2020            inner: InnerVec::from_slice(s.len(), s, pool.into()),
2021        }
2022    }
2023
2024    /// Creates a new `BytesVec` with default capacity.
2025    ///
2026    /// Resulting object has length 0 and unspecified capacity.
2027    /// This function does not allocate.
2028    ///
2029    /// # Examples
2030    ///
2031    /// ```
2032    /// use ntex_bytes::{BytesVec, BufMut};
2033    ///
2034    /// let mut bytes = BytesVec::new();
2035    ///
2036    /// assert_eq!(0, bytes.len());
2037    ///
2038    /// bytes.reserve(2);
2039    /// bytes.put_slice(b"xy");
2040    ///
2041    /// assert_eq!(&b"xy"[..], &bytes[..]);
2042    /// ```
2043    #[inline]
2044    pub fn new() -> BytesVec {
2045        BytesVec::with_capacity(MIN_NON_ZERO_CAP)
2046    }
2047
2048    /// Returns the number of bytes contained in this `BytesVec`.
2049    ///
2050    /// # Examples
2051    ///
2052    /// ```
2053    /// use ntex_bytes::BytesVec;
2054    ///
2055    /// let b = BytesVec::copy_from_slice(&b"hello"[..]);
2056    /// assert_eq!(b.len(), 5);
2057    /// ```
2058    #[inline]
2059    pub fn len(&self) -> usize {
2060        self.inner.len()
2061    }
2062
2063    /// Returns true if the `BytesVec` has a length of 0.
2064    ///
2065    /// # Examples
2066    ///
2067    /// ```
2068    /// use ntex_bytes::BytesVec;
2069    ///
2070    /// let b = BytesVec::with_capacity(64);
2071    /// assert!(b.is_empty());
2072    /// ```
2073    #[inline]
2074    pub fn is_empty(&self) -> bool {
2075        self.inner.len() == 0
2076    }
2077
2078    /// Returns the number of bytes the `BytesVec` can hold without reallocating.
2079    ///
2080    /// # Examples
2081    ///
2082    /// ```
2083    /// use ntex_bytes::BytesVec;
2084    ///
2085    /// let b = BytesVec::with_capacity(64);
2086    /// assert_eq!(b.capacity(), 64);
2087    /// ```
2088    #[inline]
2089    pub fn capacity(&self) -> usize {
2090        self.inner.capacity()
2091    }
2092
2093    /// Converts `self` into an immutable `Bytes`.
2094    ///
2095    /// The conversion is zero cost and is used to indicate that the slice
2096    /// referenced by the handle will no longer be mutated. Once the conversion
2097    /// is done, the handle can be cloned and shared across threads.
2098    ///
2099    /// # Examples
2100    ///
2101    /// ```
2102    /// use ntex_bytes::{BytesVec, BufMut};
2103    /// use std::thread;
2104    ///
2105    /// let mut b = BytesVec::with_capacity(64);
2106    /// b.put("hello world");
2107    /// let b1 = b.freeze();
2108    /// let b2 = b1.clone();
2109    ///
2110    /// let th = thread::spawn(move || {
2111    ///     assert_eq!(b1, b"hello world");
2112    /// });
2113    ///
2114    /// assert_eq!(b2, b"hello world");
2115    /// th.join().unwrap();
2116    /// ```
2117    #[inline]
2118    pub fn freeze(self) -> Bytes {
2119        Bytes {
2120            inner: self.inner.into_inner(),
2121        }
2122    }
2123
2124    /// Removes the bytes from the current view, returning them in a new
2125    /// `Bytes` instance.
2126    ///
2127    /// Afterwards, `self` will be empty, but will retain any additional
2128    /// capacity that it had before the operation. This is identical to
2129    /// `self.split_to(self.len())`.
2130    ///
2131    /// This is an `O(1)` operation that just increases the reference count and
2132    /// sets a few indices.
2133    ///
2134    /// # Examples
2135    ///
2136    /// ```
2137    /// use ntex_bytes::{BytesVec, BufMut};
2138    ///
2139    /// let mut buf = BytesVec::with_capacity(1024);
2140    /// buf.put(&b"hello world"[..]);
2141    ///
2142    /// let other = buf.split();
2143    ///
2144    /// assert!(buf.is_empty());
2145    /// assert_eq!(1013, buf.capacity());
2146    ///
2147    /// assert_eq!(other, b"hello world"[..]);
2148    /// ```
2149    pub fn split(&mut self) -> BytesMut {
2150        self.split_to(self.len())
2151    }
2152
2153    /// Splits the buffer into two at the given index.
2154    ///
2155    /// Afterwards `self` contains elements `[at, len)`, and the returned `Bytes`
2156    /// contains elements `[0, at)`.
2157    ///
2158    /// This is an `O(1)` operation that just increases the reference count and
2159    /// sets a few indices.
2160    ///
2161    /// # Examples
2162    ///
2163    /// ```
2164    /// use ntex_bytes::BytesVec;
2165    ///
2166    /// let mut a = BytesVec::copy_from_slice(&b"hello world"[..]);
2167    /// let mut b = a.split_to(5);
2168    ///
2169    /// a[0] = b'!';
2170    ///
2171    /// assert_eq!(&a[..], b"!world");
2172    /// assert_eq!(&b[..], b"hello");
2173    /// ```
2174    ///
2175    /// # Panics
2176    ///
2177    /// Panics if `at > len`.
2178    pub fn split_to(&mut self, at: usize) -> BytesMut {
2179        self.split_to_checked(at)
2180            .expect("at value must be <= self.len()`")
2181    }
2182
2183    /// Splits the bytes into two at the given index.
2184    ///
2185    /// Does nothing if `at > len`.
2186    pub fn split_to_checked(&mut self, at: usize) -> Option<BytesMut> {
2187        if at <= self.len() {
2188            Some(BytesMut {
2189                inner: self.inner.split_to(at, false),
2190            })
2191        } else {
2192            None
2193        }
2194    }
2195
2196    /// Shortens the buffer, keeping the first `len` bytes and dropping the
2197    /// rest.
2198    ///
2199    /// If `len` is greater than the buffer's current length, this has no
2200    /// effect.
2201    ///
2202    /// The [`split_off`] method can emulate `truncate`, but this causes the
2203    /// excess bytes to be returned instead of dropped.
2204    ///
2205    /// # Examples
2206    ///
2207    /// ```
2208    /// use ntex_bytes::BytesVec;
2209    ///
2210    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2211    /// buf.truncate(5);
2212    /// assert_eq!(buf, b"hello"[..]);
2213    /// ```
2214    ///
2215    /// [`split_off`]: #method.split_off
2216    pub fn truncate(&mut self, len: usize) {
2217        self.inner.truncate(len);
2218    }
2219
2220    /// Clears the buffer, removing all data.
2221    ///
2222    /// # Examples
2223    ///
2224    /// ```
2225    /// use ntex_bytes::BytesVec;
2226    ///
2227    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2228    /// buf.clear();
2229    /// assert!(buf.is_empty());
2230    /// ```
2231    pub fn clear(&mut self) {
2232        self.truncate(0);
2233    }
2234
2235    /// Resizes the buffer so that `len` is equal to `new_len`.
2236    ///
2237    /// If `new_len` is greater than `len`, the buffer is extended by the
2238    /// difference with each additional byte set to `value`. If `new_len` is
2239    /// less than `len`, the buffer is simply truncated.
2240    ///
2241    /// # Panics
2242    ///
2243    /// Panics if `new_len` greater than 60bit for 64bit systems
2244    /// and 28bit for 32bit systems
2245    ///
2246    /// # Examples
2247    ///
2248    /// ```
2249    /// use ntex_bytes::BytesVec;
2250    ///
2251    /// let mut buf = BytesVec::new();
2252    ///
2253    /// buf.resize(3, 0x1);
2254    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
2255    ///
2256    /// buf.resize(2, 0x2);
2257    /// assert_eq!(&buf[..], &[0x1, 0x1]);
2258    ///
2259    /// buf.resize(4, 0x3);
2260    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
2261    /// ```
2262    #[inline]
2263    pub fn resize(&mut self, new_len: usize, value: u8) {
2264        self.inner.resize(new_len, value);
2265    }
2266
2267    /// Sets the length of the buffer.
2268    ///
2269    /// This will explicitly set the size of the buffer without actually
2270    /// modifying the data, so it is up to the caller to ensure that the data
2271    /// has been initialized.
2272    ///
2273    /// # Examples
2274    ///
2275    /// ```
2276    /// use ntex_bytes::BytesVec;
2277    ///
2278    /// let mut b = BytesVec::copy_from_slice(&b"hello world"[..]);
2279    ///
2280    /// unsafe {
2281    ///     b.set_len(5);
2282    /// }
2283    ///
2284    /// assert_eq!(&b[..], b"hello");
2285    ///
2286    /// unsafe {
2287    ///     b.set_len(11);
2288    /// }
2289    ///
2290    /// assert_eq!(&b[..], b"hello world");
2291    /// ```
2292    ///
2293    /// # Panics
2294    ///
2295    /// This method will panic if `len` is out of bounds for the underlying
2296    /// slice or if it comes after the `end` of the configured window.
2297    #[inline]
2298    #[allow(clippy::missing_safety_doc)]
2299    pub unsafe fn set_len(&mut self, len: usize) {
2300        self.inner.set_len(len)
2301    }
2302
2303    /// Reserves capacity for at least `additional` more bytes to be inserted
2304    /// into the given `BytesVec`.
2305    ///
2306    /// More than `additional` bytes may be reserved in order to avoid frequent
2307    /// reallocations. A call to `reserve` may result in an allocation.
2308    ///
2309    /// Before allocating new buffer space, the function will attempt to reclaim
2310    /// space in the existing buffer. If the current handle references a small
2311    /// view in the original buffer and all other handles have been dropped,
2312    /// and the requested capacity is less than or equal to the existing
2313    /// buffer's capacity, then the current view will be copied to the front of
2314    /// the buffer and the handle will take ownership of the full buffer.
2315    ///
2316    /// # Panics
2317    ///
2318    /// Panics if new capacity is greater than 60bit for 64bit systems
2319    /// and 28bit for 32bit systems
2320    ///
2321    /// # Examples
2322    ///
2323    /// In the following example, a new buffer is allocated.
2324    ///
2325    /// ```
2326    /// use ntex_bytes::BytesVec;
2327    ///
2328    /// let mut buf = BytesVec::copy_from_slice(&b"hello"[..]);
2329    /// buf.reserve(64);
2330    /// assert!(buf.capacity() >= 69);
2331    /// ```
2332    ///
2333    /// In the following example, the existing buffer is reclaimed.
2334    ///
2335    /// ```
2336    /// use ntex_bytes::{BytesVec, BufMut};
2337    ///
2338    /// let mut buf = BytesVec::with_capacity(128);
2339    /// buf.put(&[0; 64][..]);
2340    ///
2341    /// let ptr = buf.as_ptr();
2342    /// let other = buf.split();
2343    ///
2344    /// assert!(buf.is_empty());
2345    /// assert_eq!(buf.capacity(), 64);
2346    ///
2347    /// drop(other);
2348    /// buf.reserve(128);
2349    ///
2350    /// assert_eq!(buf.capacity(), 128);
2351    /// assert_eq!(buf.as_ptr(), ptr);
2352    /// ```
2353    ///
2354    /// # Panics
2355    ///
2356    /// Panics if the new capacity overflows `usize`.
2357    #[inline]
2358    pub fn reserve(&mut self, additional: usize) {
2359        let len = self.len();
2360        let rem = self.capacity() - len;
2361
2362        if additional <= rem {
2363            // The handle can already store at least `additional` more bytes, so
2364            // there is no further work needed to be done.
2365            return;
2366        }
2367
2368        self.inner.reserve_inner(additional);
2369    }
2370
2371    /// Appends given bytes to this object.
2372    ///
2373    /// If this `BytesVec` object has not enough capacity, it is resized first.
2374    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
2375    ///
2376    /// # Examples
2377    ///
2378    /// ```
2379    /// use ntex_bytes::BytesVec;
2380    ///
2381    /// let mut buf = BytesVec::with_capacity(0);
2382    /// buf.extend_from_slice(b"aaabbb");
2383    /// buf.extend_from_slice(b"cccddd");
2384    ///
2385    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
2386    /// ```
2387    #[inline]
2388    pub fn extend_from_slice(&mut self, extend: &[u8]) {
2389        self.put_slice(extend);
2390    }
2391
2392    /// Run provided function with `BytesMut` instance that contains current data.
2393    #[inline]
2394    pub fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2395    where
2396        F: FnOnce(&mut BytesMut) -> R,
2397    {
2398        self.inner.with_bytes_mut(f)
2399    }
2400
2401    /// Returns an iterator over the bytes contained by the buffer.
2402    ///
2403    /// # Examples
2404    ///
2405    /// ```
2406    /// use ntex_bytes::{Buf, BytesVec};
2407    ///
2408    /// let buf = BytesVec::copy_from_slice(&b"abc"[..]);
2409    /// let mut iter = buf.iter();
2410    ///
2411    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
2412    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
2413    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
2414    /// assert_eq!(iter.next(), None);
2415    /// ```
2416    #[inline]
2417    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
2418        self.chunk().iter()
2419    }
2420
2421    #[cfg(feature = "mpool")]
2422    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
2423        self.inner.move_to_pool(pool);
2424    }
2425}
2426
2427impl Buf for BytesVec {
2428    #[inline]
2429    fn remaining(&self) -> usize {
2430        self.len()
2431    }
2432
2433    #[inline]
2434    fn chunk(&self) -> &[u8] {
2435        self.inner.as_ref()
2436    }
2437
2438    #[inline]
2439    fn advance(&mut self, cnt: usize) {
2440        assert!(
2441            cnt <= self.inner.as_ref().len(),
2442            "cannot advance past `remaining`"
2443        );
2444        unsafe {
2445            self.inner.set_start(cnt as u32);
2446        }
2447    }
2448}
2449
2450impl BufMut for BytesVec {
2451    #[inline]
2452    fn remaining_mut(&self) -> usize {
2453        self.capacity() - self.len()
2454    }
2455
2456    #[inline]
2457    unsafe fn advance_mut(&mut self, cnt: usize) {
2458        let new_len = self.len() + cnt;
2459
2460        // This call will panic if `cnt` is too big
2461        self.inner.set_len(new_len);
2462    }
2463
2464    #[inline]
2465    fn chunk_mut(&mut self) -> &mut UninitSlice {
2466        let len = self.len();
2467
2468        unsafe {
2469            // This will never panic as `len` can never become invalid
2470            let ptr = &mut self.inner.as_raw()[len..];
2471
2472            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
2473        }
2474    }
2475
2476    #[inline]
2477    fn put_slice(&mut self, src: &[u8]) {
2478        let len = src.len();
2479        self.reserve(len);
2480
2481        unsafe {
2482            ptr::copy_nonoverlapping(src.as_ptr(), self.chunk_mut().as_mut_ptr(), len);
2483            self.advance_mut(len);
2484        }
2485    }
2486
2487    #[inline]
2488    fn put_u8(&mut self, n: u8) {
2489        self.reserve(1);
2490        self.inner.put_u8(n);
2491    }
2492
2493    #[inline]
2494    fn put_i8(&mut self, n: i8) {
2495        self.reserve(1);
2496        self.put_u8(n as u8);
2497    }
2498}
2499
2500impl AsRef<[u8]> for BytesVec {
2501    #[inline]
2502    fn as_ref(&self) -> &[u8] {
2503        self.inner.as_ref()
2504    }
2505}
2506
2507impl AsMut<[u8]> for BytesVec {
2508    #[inline]
2509    fn as_mut(&mut self) -> &mut [u8] {
2510        self.inner.as_mut()
2511    }
2512}
2513
2514impl Deref for BytesVec {
2515    type Target = [u8];
2516
2517    #[inline]
2518    fn deref(&self) -> &[u8] {
2519        self.as_ref()
2520    }
2521}
2522
2523impl DerefMut for BytesVec {
2524    #[inline]
2525    fn deref_mut(&mut self) -> &mut [u8] {
2526        self.inner.as_mut()
2527    }
2528}
2529
2530impl Eq for BytesVec {}
2531
2532impl PartialEq for BytesVec {
2533    #[inline]
2534    fn eq(&self, other: &BytesVec) -> bool {
2535        self.inner.as_ref() == other.inner.as_ref()
2536    }
2537}
2538
2539impl Default for BytesVec {
2540    #[inline]
2541    fn default() -> BytesVec {
2542        BytesVec::new()
2543    }
2544}
2545
2546impl Borrow<[u8]> for BytesVec {
2547    #[inline]
2548    fn borrow(&self) -> &[u8] {
2549        self.as_ref()
2550    }
2551}
2552
2553impl BorrowMut<[u8]> for BytesVec {
2554    #[inline]
2555    fn borrow_mut(&mut self) -> &mut [u8] {
2556        self.as_mut()
2557    }
2558}
2559
2560impl fmt::Debug for BytesVec {
2561    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2562        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
2563    }
2564}
2565
2566impl fmt::Write for BytesVec {
2567    #[inline]
2568    fn write_str(&mut self, s: &str) -> fmt::Result {
2569        if self.remaining_mut() >= s.len() {
2570            self.put_slice(s.as_bytes());
2571            Ok(())
2572        } else {
2573            Err(fmt::Error)
2574        }
2575    }
2576
2577    #[inline]
2578    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
2579        fmt::write(self, args)
2580    }
2581}
2582
2583impl IntoIterator for BytesVec {
2584    type Item = u8;
2585    type IntoIter = IntoIter<BytesVec>;
2586
2587    fn into_iter(self) -> Self::IntoIter {
2588        IntoIter::new(self)
2589    }
2590}
2591
2592impl<'a> IntoIterator for &'a BytesVec {
2593    type Item = &'a u8;
2594    type IntoIter = std::slice::Iter<'a, u8>;
2595
2596    fn into_iter(self) -> Self::IntoIter {
2597        self.as_ref().iter()
2598    }
2599}
2600
2601impl FromIterator<u8> for BytesVec {
2602    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
2603        let iter = into_iter.into_iter();
2604        let (min, maybe_max) = iter.size_hint();
2605
2606        let mut out = BytesVec::with_capacity(maybe_max.unwrap_or(min));
2607        for i in iter {
2608            out.reserve(1);
2609            out.put_u8(i);
2610        }
2611
2612        out
2613    }
2614}
2615
2616impl<'a> FromIterator<&'a u8> for BytesVec {
2617    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
2618        into_iter.into_iter().copied().collect::<BytesVec>()
2619    }
2620}
2621
2622impl Extend<u8> for BytesVec {
2623    fn extend<T>(&mut self, iter: T)
2624    where
2625        T: IntoIterator<Item = u8>,
2626    {
2627        let iter = iter.into_iter();
2628
2629        let (lower, _) = iter.size_hint();
2630        self.reserve(lower);
2631
2632        for b in iter {
2633            self.put_u8(b);
2634        }
2635    }
2636}
2637
2638impl<'a> Extend<&'a u8> for BytesVec {
2639    fn extend<T>(&mut self, iter: T)
2640    where
2641        T: IntoIterator<Item = &'a u8>,
2642    {
2643        self.extend(iter.into_iter().copied())
2644    }
2645}
2646
2647struct InnerVec(NonNull<SharedVec>);
2648
2649impl InnerVec {
2650    #[inline]
2651    fn with_capacity(capacity: usize, pool: PoolRef) -> InnerVec {
2652        Self::from_slice(capacity, &[], pool)
2653    }
2654
2655    #[inline]
2656    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> InnerVec {
2657        // vec must be aligned to SharedVec instead of u8
2658        let vec_cap = if cap % SHARED_VEC_SIZE != 0 {
2659            (cap / SHARED_VEC_SIZE) + 2
2660        } else {
2661            (cap / SHARED_VEC_SIZE) + 1
2662        };
2663        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
2664        unsafe {
2665            // Store data in vec
2666            let len = src.len() as u32;
2667            let cap = vec.capacity() * SHARED_VEC_SIZE;
2668            let shared_ptr = vec.as_mut_ptr();
2669            mem::forget(vec);
2670            pool.acquire(cap);
2671
2672            let ptr = shared_ptr.add(1) as *mut u8;
2673            if !src.is_empty() {
2674                ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
2675            }
2676            ptr::write(
2677                shared_ptr,
2678                SharedVec {
2679                    len,
2680                    cap,
2681                    pool,
2682                    ref_count: AtomicUsize::new(1),
2683                    offset: SHARED_VEC_SIZE as u32,
2684                },
2685            );
2686
2687            InnerVec(NonNull::new_unchecked(shared_ptr))
2688        }
2689    }
2690
2691    #[cfg(feature = "mpool")]
2692    #[inline]
2693    fn move_to_pool(&mut self, pool: PoolRef) {
2694        unsafe {
2695            let inner = self.as_inner();
2696            if pool != inner.pool {
2697                pool.acquire(inner.cap);
2698                let pool = mem::replace(&mut inner.pool, pool);
2699                pool.release(inner.cap);
2700            }
2701        }
2702    }
2703
2704    /// Return a slice for the handle's view into the shared buffer
2705    #[inline]
2706    fn as_ref(&self) -> &[u8] {
2707        unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
2708    }
2709
2710    /// Return a mutable slice for the handle's view into the shared buffer
2711    #[inline]
2712    fn as_mut(&mut self) -> &mut [u8] {
2713        unsafe { slice::from_raw_parts_mut(self.as_ptr(), self.len()) }
2714    }
2715
2716    /// Return a mutable slice for the handle's view into the shared buffer
2717    /// including potentially uninitialized bytes.
2718    #[inline]
2719    unsafe fn as_raw(&mut self) -> &mut [u8] {
2720        slice::from_raw_parts_mut(self.as_ptr(), self.capacity())
2721    }
2722
2723    /// Return a raw pointer to data
2724    #[inline]
2725    unsafe fn as_ptr(&self) -> *mut u8 {
2726        (self.0.as_ptr() as *mut u8).add((*self.0.as_ptr()).offset as usize)
2727    }
2728
2729    #[inline]
2730    unsafe fn as_inner(&mut self) -> &mut SharedVec {
2731        self.0.as_mut()
2732    }
2733
2734    /// Insert a byte into the next slot and advance the len by 1.
2735    #[inline]
2736    fn put_u8(&mut self, n: u8) {
2737        unsafe {
2738            let inner = self.as_inner();
2739            let len = inner.len as usize;
2740            assert!(len < (inner.cap - inner.offset as usize));
2741            inner.len += 1;
2742            *self.as_ptr().add(len) = n;
2743        }
2744    }
2745
2746    #[inline]
2747    fn len(&self) -> usize {
2748        unsafe { (*self.0.as_ptr()).len as usize }
2749    }
2750
2751    /// slice.
2752    #[inline]
2753    unsafe fn set_len(&mut self, len: usize) {
2754        let inner = self.as_inner();
2755        assert!(len <= (inner.cap - inner.offset as usize) && len < u32::MAX as usize);
2756        inner.len = len as u32;
2757    }
2758
2759    #[inline]
2760    fn capacity(&self) -> usize {
2761        unsafe { (*self.0.as_ptr()).cap - (*self.0.as_ptr()).offset as usize }
2762    }
2763
2764    fn into_inner(mut self) -> Inner {
2765        unsafe {
2766            let ptr = self.as_ptr();
2767
2768            if self.len() <= INLINE_CAP {
2769                Inner::from_ptr_inline(ptr, self.len())
2770            } else {
2771                let inner = self.as_inner();
2772
2773                let inner = Inner {
2774                    ptr,
2775                    len: inner.len as usize,
2776                    cap: inner.cap - inner.offset as usize,
2777                    arc: NonNull::new_unchecked(
2778                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2779                    ),
2780                };
2781                mem::forget(self);
2782                inner
2783            }
2784        }
2785    }
2786
2787    fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2788    where
2789        F: FnOnce(&mut BytesMut) -> R,
2790    {
2791        unsafe {
2792            // create Inner for BytesMut
2793            let ptr = self.as_ptr();
2794            let inner = self.as_inner();
2795            let inner = Inner {
2796                ptr,
2797                len: inner.len as usize,
2798                cap: inner.cap - inner.offset as usize,
2799                arc: NonNull::new_unchecked(
2800                    (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2801                ),
2802            };
2803
2804            // run function
2805            let mut buf = BytesMut { inner };
2806            let result = f(&mut buf);
2807
2808            // convert BytesMut back to InnerVec
2809            let kind = buf.inner.kind();
2810            let new_inner =
2811                // only KIND_VEC could be converted to self, otherwise we have to copy data
2812                if kind == KIND_INLINE || kind == KIND_STATIC || kind == KIND_ARC {
2813                    InnerVec::from_slice(
2814                        buf.inner.capacity(),
2815                        buf.inner.as_ref(),
2816                        buf.inner.pool(),
2817                    )
2818                } else if kind == KIND_VEC {
2819                    let ptr = buf.inner.shared_vec();
2820                    let offset = buf.inner.ptr as usize - ptr as usize;
2821
2822                    // we cannot use shared vec if BytesMut points to inside of vec
2823                    if buf.inner.cap < (*ptr).cap - offset {
2824                        InnerVec::from_slice(
2825                            buf.inner.capacity(),
2826                            buf.inner.as_ref(),
2827                            buf.inner.pool(),
2828                        )
2829                    } else {
2830                        // BytesMut owns rest of the vec, so re-use
2831                        (*ptr).len = buf.len() as u32;
2832                        (*ptr).offset = offset as u32;
2833                        let inner = InnerVec(NonNull::new_unchecked(ptr));
2834                        mem::forget(buf); // reuse bytes
2835                        inner
2836                    }
2837                } else {
2838                    panic!()
2839                };
2840
2841            // drop old inner, we cannot drop because BytesMut used it
2842            let old = mem::replace(self, new_inner);
2843            mem::forget(old);
2844
2845            result
2846        }
2847    }
2848
2849    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
2850        unsafe {
2851            let ptr = self.as_ptr();
2852
2853            let other = if create_inline && at <= INLINE_CAP {
2854                Inner::from_ptr_inline(ptr, at)
2855            } else {
2856                let inner = self.as_inner();
2857                let old_size = inner.ref_count.fetch_add(1, Relaxed);
2858                if old_size == usize::MAX {
2859                    abort();
2860                }
2861
2862                Inner {
2863                    ptr,
2864                    len: at,
2865                    cap: at,
2866                    arc: NonNull::new_unchecked(
2867                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2868                    ),
2869                }
2870            };
2871            self.set_start(at as u32);
2872
2873            other
2874        }
2875    }
2876
2877    fn truncate(&mut self, len: usize) {
2878        unsafe {
2879            if len <= self.len() {
2880                self.set_len(len);
2881            }
2882        }
2883    }
2884
2885    fn resize(&mut self, new_len: usize, value: u8) {
2886        let len = self.len();
2887        if new_len > len {
2888            let additional = new_len - len;
2889            self.reserve(additional);
2890            unsafe {
2891                let dst = self.as_raw()[len..].as_mut_ptr();
2892                ptr::write_bytes(dst, value, additional);
2893                self.set_len(new_len);
2894            }
2895        } else {
2896            self.truncate(new_len);
2897        }
2898    }
2899
2900    #[inline]
2901    fn reserve(&mut self, additional: usize) {
2902        let len = self.len();
2903        let rem = self.capacity() - len;
2904
2905        if additional <= rem {
2906            // The handle can already store at least `additional` more bytes, so
2907            // there is no further work needed to be done.
2908            return;
2909        }
2910
2911        self.reserve_inner(additional)
2912    }
2913
2914    #[inline]
2915    // In separate function to allow the short-circuits in `reserve` to
2916    // be inline-able. Significant helps performance.
2917    fn reserve_inner(&mut self, additional: usize) {
2918        let len = self.len();
2919
2920        // Reserving involves abandoning the currently shared buffer and
2921        // allocating a new vector with the requested capacity.
2922        let new_cap = len + additional;
2923
2924        unsafe {
2925            let inner = self.as_inner();
2926            let vec_cap = inner.cap - SHARED_VEC_SIZE;
2927
2928            // try to reclaim the buffer. This is possible if the current
2929            // handle is the only outstanding handle pointing to the buffer.
2930            if inner.is_unique() && vec_cap >= new_cap {
2931                let offset = inner.offset;
2932                inner.offset = SHARED_VEC_SIZE as u32;
2933
2934                // The capacity is sufficient, reclaim the buffer
2935                let src = (self.0.as_ptr() as *mut u8).add(offset as usize);
2936                let dst = (self.0.as_ptr() as *mut u8).add(SHARED_VEC_SIZE);
2937                ptr::copy(src, dst, len);
2938            } else {
2939                // Create a new vector storage
2940                let pool = inner.pool;
2941                *self = InnerVec::from_slice(new_cap, self.as_ref(), pool);
2942            }
2943        }
2944    }
2945
2946    unsafe fn set_start(&mut self, start: u32) {
2947        // Setting the start to 0 is a no-op, so return early if this is the
2948        // case.
2949        if start == 0 {
2950            return;
2951        }
2952
2953        let inner = self.as_inner();
2954        assert!(start <= inner.cap as u32);
2955
2956        // Updating the start of the view is setting `offset` to point to the
2957        // new start and updating the `len` field to reflect the new length
2958        // of the view.
2959        inner.offset += start;
2960
2961        if inner.len >= start {
2962            inner.len -= start;
2963        } else {
2964            inner.len = 0;
2965        }
2966    }
2967}
2968
2969impl Drop for InnerVec {
2970    fn drop(&mut self) {
2971        release_shared_vec(self.0.as_ptr());
2972    }
2973}
2974
2975/*
2976 *
2977 * ===== Inner =====
2978 *
2979 */
2980
2981impl Inner {
2982    #[inline]
2983    const fn from_static(bytes: &'static [u8]) -> Inner {
2984        let ptr = bytes.as_ptr() as *mut u8;
2985
2986        Inner {
2987            // `arc` won't ever store a pointer. Instead, use it to
2988            // track the fact that the `Bytes` handle is backed by a
2989            // static buffer.
2990            arc: unsafe { NonNull::new_unchecked(KIND_STATIC as *mut Shared) },
2991            ptr,
2992            len: bytes.len(),
2993            cap: bytes.len(),
2994        }
2995    }
2996
2997    #[inline]
2998    const fn empty_inline() -> Inner {
2999        Inner {
3000            arc: unsafe { NonNull::new_unchecked(KIND_INLINE as *mut Shared) },
3001            ptr: 0 as *mut u8,
3002            len: 0,
3003            cap: 0,
3004        }
3005    }
3006
3007    #[inline]
3008    fn from_vec(mut vec: Vec<u8>, pool: PoolRef) -> Inner {
3009        let len = vec.len();
3010        let cap = vec.capacity();
3011        let ptr = vec.as_mut_ptr();
3012        pool.acquire(cap);
3013
3014        // Store data in arc
3015        let shared = Box::into_raw(Box::new(Shared {
3016            vec,
3017            pool,
3018            ref_count: AtomicUsize::new(1),
3019        }));
3020
3021        // The pointer should be aligned, so this assert should always succeed.
3022        debug_assert!(0 == (shared as usize & KIND_MASK));
3023
3024        // Create new arc, so atomic operations can be avoided.
3025        Inner {
3026            ptr,
3027            len,
3028            cap,
3029            arc: unsafe { NonNull::new_unchecked(shared) },
3030        }
3031    }
3032
3033    #[inline]
3034    fn with_capacity(capacity: usize, pool: PoolRef) -> Inner {
3035        Inner::from_slice(capacity, &[], pool)
3036    }
3037
3038    #[inline]
3039    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> Inner {
3040        // vec must be aligned to SharedVec instead of u8
3041        let mut vec_cap = (cap / SHARED_VEC_SIZE) + 1;
3042        if cap % SHARED_VEC_SIZE != 0 {
3043            vec_cap += 1;
3044        }
3045        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
3046
3047        // Store data in vec
3048        let len = src.len();
3049        let full_cap = vec.capacity() * SHARED_VEC_SIZE;
3050        let cap = full_cap - SHARED_VEC_SIZE;
3051        vec.push(SharedVec {
3052            pool,
3053            cap: full_cap,
3054            ref_count: AtomicUsize::new(1),
3055            len: 0,
3056            offset: 0,
3057        });
3058        pool.acquire(full_cap);
3059
3060        let shared_ptr = vec.as_mut_ptr();
3061        mem::forget(vec);
3062
3063        let (ptr, arc) = unsafe {
3064            let ptr = shared_ptr.add(1) as *mut u8;
3065            ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
3066            let arc =
3067                NonNull::new_unchecked((shared_ptr as usize ^ KIND_VEC) as *mut Shared);
3068            (ptr, arc)
3069        };
3070
3071        // Create new arc, so atomic operations can be avoided.
3072        Inner { len, cap, ptr, arc }
3073    }
3074
3075    #[inline]
3076    fn from_slice_inline(src: &[u8]) -> Inner {
3077        unsafe { Inner::from_ptr_inline(src.as_ptr(), src.len()) }
3078    }
3079
3080    #[inline]
3081    unsafe fn from_ptr_inline(src: *const u8, len: usize) -> Inner {
3082        let mut inner = Inner {
3083            arc: NonNull::new_unchecked(KIND_INLINE as *mut Shared),
3084            ptr: ptr::null_mut(),
3085            len: 0,
3086            cap: 0,
3087        };
3088
3089        let dst = inner.inline_ptr();
3090        ptr::copy(src, dst, len);
3091        inner.set_inline_len(len);
3092        inner
3093    }
3094
3095    #[inline]
3096    fn pool(&self) -> PoolRef {
3097        let kind = self.kind();
3098
3099        if kind == KIND_VEC {
3100            unsafe { (*self.shared_vec()).pool }
3101        } else if kind == KIND_ARC {
3102            unsafe { (*self.arc.as_ptr()).pool }
3103        } else {
3104            PoolId::DEFAULT.pool_ref()
3105        }
3106    }
3107
3108    #[cfg(feature = "mpool")]
3109    #[inline]
3110    fn move_to_pool(&mut self, pool: PoolRef) {
3111        let kind = self.kind();
3112
3113        if kind == KIND_VEC {
3114            let vec = self.shared_vec();
3115            unsafe {
3116                let cap = (*vec).cap;
3117                pool.acquire(cap);
3118                let pool = mem::replace(&mut (*vec).pool, pool);
3119                pool.release(cap);
3120            }
3121        } else if kind == KIND_ARC {
3122            let arc = self.arc.as_ptr();
3123            unsafe {
3124                let cap = (*arc).vec.capacity();
3125                pool.acquire(cap);
3126                let pool = mem::replace(&mut (*arc).pool, pool);
3127                pool.release(cap);
3128            }
3129        }
3130    }
3131
3132    /// Return a slice for the handle's view into the shared buffer
3133    #[inline]
3134    fn as_ref(&self) -> &[u8] {
3135        unsafe {
3136            if self.is_inline() {
3137                slice::from_raw_parts(self.inline_ptr_ro(), self.inline_len())
3138            } else {
3139                slice::from_raw_parts(self.ptr, self.len)
3140            }
3141        }
3142    }
3143
3144    /// Return a mutable slice for the handle's view into the shared buffer
3145    #[inline]
3146    fn as_mut(&mut self) -> &mut [u8] {
3147        debug_assert!(!self.is_static());
3148
3149        unsafe {
3150            if self.is_inline() {
3151                slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
3152            } else {
3153                slice::from_raw_parts_mut(self.ptr, self.len)
3154            }
3155        }
3156    }
3157
3158    /// Return a mutable slice for the handle's view into the shared buffer
3159    /// including potentially uninitialized bytes.
3160    #[inline]
3161    unsafe fn as_raw(&mut self) -> &mut [u8] {
3162        debug_assert!(!self.is_static());
3163
3164        if self.is_inline() {
3165            slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
3166        } else {
3167            slice::from_raw_parts_mut(self.ptr, self.cap)
3168        }
3169    }
3170
3171    /// Return a raw pointer to data
3172    #[inline]
3173    unsafe fn as_ptr(&mut self) -> *mut u8 {
3174        if self.is_inline() {
3175            self.inline_ptr()
3176        } else {
3177            self.ptr
3178        }
3179    }
3180
3181    /// Insert a byte into the next slot and advance the len by 1.
3182    #[inline]
3183    fn put_u8(&mut self, n: u8) {
3184        if self.is_inline() {
3185            let len = self.inline_len();
3186            assert!(len < INLINE_CAP);
3187            unsafe {
3188                *self.inline_ptr().add(len) = n;
3189            }
3190            self.set_inline_len(len + 1);
3191        } else {
3192            assert!(self.len < self.cap);
3193            unsafe {
3194                *self.ptr.add(self.len) = n;
3195            }
3196            self.len += 1;
3197        }
3198    }
3199
3200    #[inline]
3201    fn len(&self) -> usize {
3202        if self.is_inline() {
3203            self.inline_len()
3204        } else {
3205            self.len
3206        }
3207    }
3208
3209    /// Pointer to the start of the inline buffer
3210    #[inline]
3211    unsafe fn inline_ptr(&mut self) -> *mut u8 {
3212        (self as *mut Inner as *mut u8).offset(INLINE_DATA_OFFSET)
3213    }
3214
3215    /// Pointer to the start of the inline buffer
3216    #[inline]
3217    unsafe fn inline_ptr_ro(&self) -> *const u8 {
3218        (self as *const Inner as *const u8).offset(INLINE_DATA_OFFSET)
3219    }
3220
3221    #[inline]
3222    fn inline_len(&self) -> usize {
3223        // This is undefind behavior due to a data race, but experimental
3224        // evidence shows that it works in practice (discussion:
3225        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3226        (self.arc.as_ptr() as usize & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
3227    }
3228
3229    /// Set the length of the inline buffer. This is done by writing to the
3230    /// least significant byte of the `arc` field.
3231    #[inline]
3232    fn set_inline_len(&mut self, len: usize) {
3233        debug_assert!(len <= INLINE_CAP);
3234        self.arc = unsafe {
3235            NonNull::new_unchecked(
3236                ((self.arc.as_ptr() as usize & !INLINE_LEN_MASK)
3237                    | (len << INLINE_LEN_OFFSET)) as _,
3238            )
3239        };
3240    }
3241
3242    /// slice.
3243    #[inline]
3244    unsafe fn set_len(&mut self, len: usize) {
3245        if self.is_inline() {
3246            assert!(len <= INLINE_CAP);
3247            self.set_inline_len(len);
3248        } else {
3249            assert!(len <= self.cap);
3250            self.len = len;
3251        }
3252    }
3253
3254    #[inline]
3255    fn is_empty(&self) -> bool {
3256        self.len() == 0
3257    }
3258
3259    #[inline]
3260    fn capacity(&self) -> usize {
3261        if self.is_inline() {
3262            INLINE_CAP
3263        } else {
3264            self.cap
3265        }
3266    }
3267
3268    fn split_off(&mut self, at: usize, create_inline: bool) -> Inner {
3269        let other = unsafe {
3270            if create_inline && self.len() - at <= INLINE_CAP {
3271                Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at)
3272            } else {
3273                let mut other = self.shallow_clone();
3274                other.set_start(at);
3275                other
3276            }
3277        };
3278        unsafe {
3279            if create_inline && at <= INLINE_CAP {
3280                *self = Inner::from_ptr_inline(self.as_ptr(), at);
3281            } else {
3282                self.set_end(at);
3283            }
3284        }
3285
3286        other
3287    }
3288
3289    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
3290        let other = unsafe {
3291            if create_inline && at <= INLINE_CAP {
3292                Inner::from_ptr_inline(self.as_ptr(), at)
3293            } else {
3294                let mut other = self.shallow_clone();
3295                other.set_end(at);
3296                other
3297            }
3298        };
3299        unsafe {
3300            if create_inline && self.len() - at <= INLINE_CAP {
3301                *self = Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at);
3302            } else {
3303                self.set_start(at);
3304            }
3305        }
3306
3307        other
3308    }
3309
3310    fn truncate(&mut self, len: usize, create_inline: bool) {
3311        unsafe {
3312            if len <= self.len() {
3313                if create_inline && len < INLINE_CAP {
3314                    *self = Inner::from_ptr_inline(self.as_ptr(), len);
3315                } else {
3316                    self.set_len(len);
3317                }
3318            }
3319        }
3320    }
3321
3322    fn resize(&mut self, new_len: usize, value: u8) {
3323        let len = self.len();
3324        if new_len > len {
3325            let additional = new_len - len;
3326            self.reserve(additional);
3327            unsafe {
3328                let dst = self.as_raw()[len..].as_mut_ptr();
3329                ptr::write_bytes(dst, value, additional);
3330                self.set_len(new_len);
3331            }
3332        } else {
3333            self.truncate(new_len, false);
3334        }
3335    }
3336
3337    unsafe fn set_start(&mut self, start: usize) {
3338        // Setting the start to 0 is a no-op, so return early if this is the
3339        // case.
3340        if start == 0 {
3341            return;
3342        }
3343
3344        let kind = self.kind();
3345
3346        // Always check `inline` first, because if the handle is using inline
3347        // data storage, all of the `Inner` struct fields will be gibberish.
3348        if kind == KIND_INLINE {
3349            assert!(start <= INLINE_CAP);
3350
3351            let len = self.inline_len();
3352            if len <= start {
3353                self.set_inline_len(0);
3354            } else {
3355                // `set_start` is essentially shifting data off the front of the
3356                // view. Inlined buffers only track the length of the slice.
3357                // So, to update the start, the data at the new starting point
3358                // is copied to the beginning of the buffer.
3359                let new_len = len - start;
3360
3361                let dst = self.inline_ptr();
3362                let src = (dst as *const u8).add(start);
3363
3364                ptr::copy(src, dst, new_len);
3365
3366                self.set_inline_len(new_len);
3367            }
3368        } else {
3369            assert!(start <= self.cap);
3370
3371            // Updating the start of the view is setting `ptr` to point to the
3372            // new start and updating the `len` field to reflect the new length
3373            // of the view.
3374            self.ptr = self.ptr.add(start);
3375
3376            if self.len >= start {
3377                self.len -= start;
3378            } else {
3379                self.len = 0;
3380            }
3381
3382            self.cap -= start;
3383        }
3384    }
3385
3386    unsafe fn set_end(&mut self, end: usize) {
3387        // Always check `inline` first, because if the handle is using inline
3388        // data storage, all of the `Inner` struct fields will be gibberish.
3389        if self.is_inline() {
3390            assert!(end <= INLINE_CAP);
3391            let new_len = cmp::min(self.inline_len(), end);
3392            self.set_inline_len(new_len);
3393        } else {
3394            assert!(end <= self.cap);
3395
3396            self.cap = end;
3397            self.len = cmp::min(self.len, end);
3398        }
3399    }
3400
3401    /// Checks if it is safe to mutate the memory
3402    fn is_mut_safe(&self) -> bool {
3403        let kind = self.kind();
3404
3405        // Always check `inline` first, because if the handle is using inline
3406        // data storage, all of the `Inner` struct fields will be gibberish.
3407        if kind == KIND_INLINE {
3408            // Inlined buffers can always be mutated as the data is never shared
3409            // across handles.
3410            true
3411        } else if kind == KIND_STATIC {
3412            false
3413        } else if kind == KIND_VEC {
3414            // Otherwise, the underlying buffer is potentially shared with other
3415            // handles, so the ref_count needs to be checked.
3416            unsafe { (*self.shared_vec()).is_unique() }
3417        } else {
3418            // Otherwise, the underlying buffer is potentially shared with other
3419            // handles, so the ref_count needs to be checked.
3420            unsafe { (*self.arc.as_ptr()).is_unique() }
3421        }
3422    }
3423
3424    /// Increments the ref count. This should only be done if it is known that
3425    /// it can be done safely. As such, this fn is not public, instead other
3426    /// fns will use this one while maintaining the guarantees.
3427    /// Parameter `mut_self` should only be set to `true` if caller holds
3428    /// `&mut self` reference.
3429    ///
3430    /// "Safely" is defined as not exposing two `BytesMut` values that point to
3431    /// the same byte window.
3432    ///
3433    /// This function is thread safe.
3434    unsafe fn shallow_clone(&self) -> Inner {
3435        // Always check `inline` first, because if the handle is using inline
3436        // data storage, all of the `Inner` struct fields will be gibberish.
3437        //
3438        // Additionally, if kind is STATIC, then Arc is *never* changed, making
3439        // it safe and faster to check for it now before an atomic acquire.
3440
3441        if self.is_inline_or_static() {
3442            // In this case, a shallow_clone still involves copying the data.
3443            let mut inner: mem::MaybeUninit<Inner> = mem::MaybeUninit::uninit();
3444            ptr::copy_nonoverlapping(self, inner.as_mut_ptr(), 1);
3445            inner.assume_init()
3446        } else {
3447            self.shallow_clone_sync()
3448        }
3449    }
3450
3451    #[cold]
3452    unsafe fn shallow_clone_sync(&self) -> Inner {
3453        // The function requires `&self`, this means that `shallow_clone`
3454        // could be called concurrently.
3455        //
3456        // The first step is to load the value of `arc`. This will determine
3457        // how to proceed. The `Acquire` ordering synchronizes with the
3458        // `compare_and_swap` that comes later in this function. The goal is
3459        // to ensure that if `arc` is currently set to point to a `Shared`,
3460        // that the current thread acquires the associated memory.
3461        let arc: *mut Shared = self.arc.as_ptr();
3462        let kind = arc as usize & KIND_MASK;
3463
3464        if kind == KIND_ARC {
3465            let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
3466            if old_size == usize::MAX {
3467                abort();
3468            }
3469
3470            Inner {
3471                arc: NonNull::new_unchecked(arc),
3472                ..*self
3473            }
3474        } else {
3475            assert!(kind == KIND_VEC);
3476
3477            let vec_arc = (arc as usize & KIND_UNMASK) as *mut SharedVec;
3478            let old_size = (*vec_arc).ref_count.fetch_add(1, Relaxed);
3479            if old_size == usize::MAX {
3480                abort();
3481            }
3482
3483            Inner {
3484                arc: NonNull::new_unchecked(arc),
3485                ..*self
3486            }
3487        }
3488    }
3489
3490    #[inline]
3491    fn reserve(&mut self, additional: usize) {
3492        let len = self.len();
3493        let rem = self.capacity() - len;
3494
3495        if additional <= rem {
3496            // The handle can already store at least `additional` more bytes, so
3497            // there is no further work needed to be done.
3498            return;
3499        }
3500
3501        self.reserve_inner(additional)
3502    }
3503
3504    #[inline]
3505    // In separate function to allow the short-circuits in `reserve` to
3506    // be inline-able. Significant helps performance.
3507    fn reserve_inner(&mut self, additional: usize) {
3508        let len = self.len();
3509        let kind = self.kind();
3510
3511        // Always check `inline` first, because if the handle is using inline
3512        // data storage, all of the `Inner` struct fields will be gibberish.
3513        if kind == KIND_INLINE {
3514            let new_cap = len + additional;
3515
3516            // Promote to a vector
3517            *self = Inner::from_slice(new_cap, self.as_ref(), PoolId::DEFAULT.pool_ref());
3518            return;
3519        }
3520
3521        // Reserving involves abandoning the currently shared buffer and
3522        // allocating a new vector with the requested capacity.
3523        let new_cap = len + additional;
3524
3525        if kind == KIND_VEC {
3526            let vec = self.shared_vec();
3527
3528            unsafe {
3529                let vec_cap = (*vec).cap - SHARED_VEC_SIZE;
3530
3531                // First, try to reclaim the buffer. This is possible if the current
3532                // handle is the only outstanding handle pointing to the buffer.
3533                if (*vec).is_unique() && vec_cap >= new_cap {
3534                    // The capacity is sufficient, reclaim the buffer
3535                    let ptr = (vec as *mut u8).add(SHARED_VEC_SIZE);
3536                    ptr::copy(self.ptr, ptr, len);
3537
3538                    self.ptr = ptr;
3539                    self.cap = vec_cap;
3540                } else {
3541                    // Create a new vector storage
3542                    *self = Inner::from_slice(new_cap, self.as_ref(), (*vec).pool);
3543                }
3544            }
3545        } else {
3546            debug_assert!(kind == KIND_ARC);
3547
3548            let arc = self.arc.as_ptr();
3549            unsafe {
3550                // First, try to reclaim the buffer. This is possible if the current
3551                // handle is the only outstanding handle pointing to the buffer.
3552                if (*arc).is_unique() {
3553                    // This is the only handle to the buffer. It can be reclaimed.
3554                    // However, before doing the work of copying data, check to make
3555                    // sure that the vector has enough capacity.
3556                    let v = &mut (*arc).vec;
3557
3558                    if v.capacity() >= new_cap {
3559                        // The capacity is sufficient, reclaim the buffer
3560                        let ptr = v.as_mut_ptr();
3561
3562                        ptr::copy(self.ptr, ptr, len);
3563
3564                        self.ptr = ptr;
3565                        self.cap = v.capacity();
3566                        return;
3567                    }
3568                }
3569
3570                // Create a new vector storage
3571                *self = Inner::from_slice(new_cap, self.as_ref(), (*arc).pool);
3572            }
3573        }
3574    }
3575
3576    /// Returns true if the buffer is stored inline
3577    #[inline]
3578    fn is_inline(&self) -> bool {
3579        self.kind() == KIND_INLINE
3580    }
3581
3582    #[inline]
3583    fn is_inline_or_static(&self) -> bool {
3584        // The value returned by `kind` isn't itself safe, but the value could
3585        // inform what operations to take, and unsafely do something without
3586        // synchronization.
3587        //
3588        // KIND_INLINE and KIND_STATIC will *never* change, so branches on that
3589        // information is safe.
3590        let kind = self.kind();
3591        kind == KIND_INLINE || kind == KIND_STATIC
3592    }
3593
3594    /// Used for `debug_assert` statements
3595    #[inline]
3596    fn is_static(&self) -> bool {
3597        matches!(self.kind(), KIND_STATIC)
3598    }
3599
3600    #[inline]
3601    fn shared_vec(&self) -> *mut SharedVec {
3602        ((self.arc.as_ptr() as usize) & KIND_UNMASK) as *mut SharedVec
3603    }
3604
3605    #[inline]
3606    fn kind(&self) -> usize {
3607        // This function is going to probably raise some eyebrows. The function
3608        // returns true if the buffer is stored inline. This is done by checking
3609        // the least significant bit in the `arc` field.
3610        //
3611        // Now, you may notice that `arc` is an `AtomicPtr` and this is
3612        // accessing it as a normal field without performing an atomic load...
3613        //
3614        // Again, the function only cares about the least significant bit, and
3615        // this bit is set when `Inner` is created and never changed after that.
3616        // All platforms have atomic "word" operations and won't randomly flip
3617        // bits, so even without any explicit atomic operations, reading the
3618        // flag will be correct.
3619        //
3620        // This is undefined behavior due to a data race, but experimental
3621        // evidence shows that it works in practice (discussion:
3622        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3623        //
3624        // This function is very critical performance wise as it is called for
3625        // every operation. Performing an atomic load would mess with the
3626        // compiler's ability to optimize. Simple benchmarks show up to a 10%
3627        // slowdown using a `Relaxed` atomic load on x86.
3628
3629        #[cfg(target_endian = "little")]
3630        #[inline]
3631        fn imp(arc: *mut Shared) -> usize {
3632            (arc as usize) & KIND_MASK
3633        }
3634
3635        #[cfg(target_endian = "big")]
3636        #[inline]
3637        fn imp(arc: *mut Shared) -> usize {
3638            unsafe {
3639                let p: *const usize = arc as *const usize;
3640                *p & KIND_MASK
3641            }
3642        }
3643
3644        imp(self.arc.as_ptr())
3645    }
3646}
3647
3648impl Drop for Inner {
3649    fn drop(&mut self) {
3650        let kind = self.kind();
3651
3652        if kind == KIND_VEC {
3653            release_shared_vec(self.shared_vec());
3654        } else if kind == KIND_ARC {
3655            release_shared(self.arc.as_ptr());
3656        }
3657    }
3658}
3659
3660fn release_shared(ptr: *mut Shared) {
3661    // `Shared` storage... follow the drop steps from Arc.
3662    unsafe {
3663        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3664            return;
3665        }
3666
3667        // This fence is needed to prevent reordering of use of the data and
3668        // deletion of the data.  Because it is marked `Release`, the decreasing
3669        // of the reference count synchronizes with this `Acquire` fence. This
3670        // means that use of the data happens before decreasing the reference
3671        // count, which happens before this fence, which happens before the
3672        // deletion of the data.
3673        //
3674        // As explained in the [Boost documentation][1],
3675        //
3676        // > It is important to enforce any possible access to the object in one
3677        // > thread (through an existing reference) to *happen before* deleting
3678        // > the object in a different thread. This is achieved by a "release"
3679        // > operation after dropping a reference (any access to the object
3680        // > through this reference must obviously happened before), and an
3681        // > "acquire" operation before deleting the object.
3682        //
3683        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3684        atomic::fence(Acquire);
3685
3686        // Drop the data
3687        let arc = Box::from_raw(ptr);
3688        arc.pool.release(arc.vec.capacity());
3689    }
3690}
3691
3692fn release_shared_vec(ptr: *mut SharedVec) {
3693    // `Shared` storage... follow the drop steps from Arc.
3694    unsafe {
3695        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3696            return;
3697        }
3698
3699        // This fence is needed to prevent reordering of use of the data and
3700        // deletion of the data.  Because it is marked `Release`, the decreasing
3701        // of the reference count synchronizes with this `Acquire` fence. This
3702        // means that use of the data happens before decreasing the reference
3703        // count, which happens before this fence, which happens before the
3704        // deletion of the data.
3705        //
3706        // As explained in the [Boost documentation][1],
3707        //
3708        // > It is important to enforce any possible access to the object in one
3709        // > thread (through an existing reference) to *happen before* deleting
3710        // > the object in a different thread. This is achieved by a "release"
3711        // > operation after dropping a reference (any access to the object
3712        // > through this reference must obviously happened before), and an
3713        // > "acquire" operation before deleting the object.
3714        //
3715        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3716        atomic::fence(Acquire);
3717
3718        // Drop the data
3719        let cap = (*ptr).cap;
3720        (*ptr).pool.release(cap);
3721        ptr::drop_in_place(ptr);
3722        Vec::<u8>::from_raw_parts(ptr as *mut u8, 0, cap);
3723    }
3724}
3725
3726impl Shared {
3727    fn is_unique(&self) -> bool {
3728        // The goal is to check if the current handle is the only handle
3729        // that currently has access to the buffer. This is done by
3730        // checking if the `ref_count` is currently 1.
3731        //
3732        // The `Acquire` ordering synchronizes with the `Release` as
3733        // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
3734        // operation guarantees that any mutations done in other threads
3735        // are ordered before the `ref_count` is decremented. As such,
3736        // this `Acquire` will guarantee that those mutations are
3737        // visible to the current thread.
3738        self.ref_count.load(Acquire) == 1
3739    }
3740}
3741
3742impl SharedVec {
3743    fn is_unique(&self) -> bool {
3744        // This is same as Shared::is_unique() but for KIND_VEC
3745        self.ref_count.load(Acquire) == 1
3746    }
3747}
3748
3749unsafe impl Send for Inner {}
3750unsafe impl Sync for Inner {}
3751
3752/*
3753 *
3754 * ===== PartialEq / PartialOrd =====
3755 *
3756 */
3757
3758impl PartialEq<[u8]> for BytesMut {
3759    fn eq(&self, other: &[u8]) -> bool {
3760        &**self == other
3761    }
3762}
3763
3764impl<const N: usize> PartialEq<[u8; N]> for BytesMut {
3765    fn eq(&self, other: &[u8; N]) -> bool {
3766        &**self == other
3767    }
3768}
3769
3770impl PartialEq<BytesMut> for [u8] {
3771    fn eq(&self, other: &BytesMut) -> bool {
3772        *other == *self
3773    }
3774}
3775
3776impl<const N: usize> PartialEq<BytesMut> for [u8; N] {
3777    fn eq(&self, other: &BytesMut) -> bool {
3778        *other == *self
3779    }
3780}
3781
3782impl<'a, const N: usize> PartialEq<BytesMut> for &'a [u8; N] {
3783    fn eq(&self, other: &BytesMut) -> bool {
3784        *other == *self
3785    }
3786}
3787
3788impl PartialEq<str> for BytesMut {
3789    fn eq(&self, other: &str) -> bool {
3790        &**self == other.as_bytes()
3791    }
3792}
3793
3794impl PartialEq<BytesMut> for str {
3795    fn eq(&self, other: &BytesMut) -> bool {
3796        *other == *self
3797    }
3798}
3799
3800impl PartialEq<Vec<u8>> for BytesMut {
3801    fn eq(&self, other: &Vec<u8>) -> bool {
3802        *self == other[..]
3803    }
3804}
3805
3806impl PartialEq<BytesMut> for Vec<u8> {
3807    fn eq(&self, other: &BytesMut) -> bool {
3808        *other == *self
3809    }
3810}
3811
3812impl PartialEq<String> for BytesMut {
3813    fn eq(&self, other: &String) -> bool {
3814        *self == other[..]
3815    }
3816}
3817
3818impl PartialEq<BytesMut> for String {
3819    fn eq(&self, other: &BytesMut) -> bool {
3820        *other == *self
3821    }
3822}
3823
3824impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
3825where
3826    BytesMut: PartialEq<T>,
3827{
3828    fn eq(&self, other: &&'a T) -> bool {
3829        *self == **other
3830    }
3831}
3832
3833impl PartialEq<BytesMut> for &[u8] {
3834    fn eq(&self, other: &BytesMut) -> bool {
3835        *other == *self
3836    }
3837}
3838
3839impl PartialEq<BytesMut> for &str {
3840    fn eq(&self, other: &BytesMut) -> bool {
3841        *other == *self
3842    }
3843}
3844
3845impl PartialEq<[u8]> for Bytes {
3846    fn eq(&self, other: &[u8]) -> bool {
3847        self.inner.as_ref() == other
3848    }
3849}
3850
3851impl<const N: usize> PartialEq<[u8; N]> for Bytes {
3852    fn eq(&self, other: &[u8; N]) -> bool {
3853        self.inner.as_ref() == other.as_ref()
3854    }
3855}
3856
3857impl PartialOrd<[u8]> for Bytes {
3858    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
3859        self.inner.as_ref().partial_cmp(other)
3860    }
3861}
3862
3863impl<const N: usize> PartialOrd<[u8; N]> for Bytes {
3864    fn partial_cmp(&self, other: &[u8; N]) -> Option<cmp::Ordering> {
3865        self.inner.as_ref().partial_cmp(other.as_ref())
3866    }
3867}
3868
3869impl PartialEq<Bytes> for [u8] {
3870    fn eq(&self, other: &Bytes) -> bool {
3871        *other == *self
3872    }
3873}
3874
3875impl<const N: usize> PartialEq<Bytes> for [u8; N] {
3876    fn eq(&self, other: &Bytes) -> bool {
3877        *other == *self
3878    }
3879}
3880
3881impl<'a, const N: usize> PartialEq<Bytes> for &'a [u8; N] {
3882    fn eq(&self, other: &Bytes) -> bool {
3883        *other == *self
3884    }
3885}
3886
3887impl PartialOrd<Bytes> for [u8] {
3888    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3889        other.partial_cmp(self)
3890    }
3891}
3892
3893impl<const N: usize> PartialOrd<Bytes> for [u8; N] {
3894    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3895        other.partial_cmp(self)
3896    }
3897}
3898
3899impl PartialEq<str> for Bytes {
3900    fn eq(&self, other: &str) -> bool {
3901        self.inner.as_ref() == other.as_bytes()
3902    }
3903}
3904
3905impl PartialOrd<str> for Bytes {
3906    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
3907        self.inner.as_ref().partial_cmp(other.as_bytes())
3908    }
3909}
3910
3911impl PartialEq<Bytes> for str {
3912    fn eq(&self, other: &Bytes) -> bool {
3913        *other == *self
3914    }
3915}
3916
3917impl PartialOrd<Bytes> for str {
3918    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3919        other.partial_cmp(self)
3920    }
3921}
3922
3923impl PartialEq<Vec<u8>> for Bytes {
3924    fn eq(&self, other: &Vec<u8>) -> bool {
3925        *self == other[..]
3926    }
3927}
3928
3929impl PartialOrd<Vec<u8>> for Bytes {
3930    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
3931        self.inner.as_ref().partial_cmp(&other[..])
3932    }
3933}
3934
3935impl PartialEq<Bytes> for Vec<u8> {
3936    fn eq(&self, other: &Bytes) -> bool {
3937        *other == *self
3938    }
3939}
3940
3941impl PartialOrd<Bytes> for Vec<u8> {
3942    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3943        other.partial_cmp(self)
3944    }
3945}
3946
3947impl PartialEq<String> for Bytes {
3948    fn eq(&self, other: &String) -> bool {
3949        *self == other[..]
3950    }
3951}
3952
3953impl PartialOrd<String> for Bytes {
3954    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
3955        self.inner.as_ref().partial_cmp(other.as_bytes())
3956    }
3957}
3958
3959impl PartialEq<Bytes> for String {
3960    fn eq(&self, other: &Bytes) -> bool {
3961        *other == *self
3962    }
3963}
3964
3965impl PartialOrd<Bytes> for String {
3966    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3967        other.partial_cmp(self)
3968    }
3969}
3970
3971impl PartialEq<Bytes> for &[u8] {
3972    fn eq(&self, other: &Bytes) -> bool {
3973        *other == *self
3974    }
3975}
3976
3977impl PartialOrd<Bytes> for &[u8] {
3978    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3979        other.partial_cmp(self)
3980    }
3981}
3982
3983impl PartialEq<Bytes> for &str {
3984    fn eq(&self, other: &Bytes) -> bool {
3985        *other == *self
3986    }
3987}
3988
3989impl PartialOrd<Bytes> for &str {
3990    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3991        other.partial_cmp(self)
3992    }
3993}
3994
3995impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
3996where
3997    Bytes: PartialEq<T>,
3998{
3999    fn eq(&self, other: &&'a T) -> bool {
4000        *self == **other
4001    }
4002}
4003
4004impl From<BytesVec> for Bytes {
4005    fn from(b: BytesVec) -> Self {
4006        b.freeze()
4007    }
4008}
4009
4010impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
4011where
4012    Bytes: PartialOrd<T>,
4013{
4014    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
4015        self.partial_cmp(&**other)
4016    }
4017}
4018
4019impl PartialEq<BytesMut> for Bytes {
4020    fn eq(&self, other: &BytesMut) -> bool {
4021        other[..] == self[..]
4022    }
4023}
4024
4025impl PartialEq<BytesVec> for Bytes {
4026    fn eq(&self, other: &BytesVec) -> bool {
4027        other[..] == self[..]
4028    }
4029}
4030
4031impl PartialEq<Bytes> for BytesVec {
4032    fn eq(&self, other: &Bytes) -> bool {
4033        other[..] == self[..]
4034    }
4035}
4036
4037impl PartialEq<Bytes> for BytesMut {
4038    fn eq(&self, other: &Bytes) -> bool {
4039        other[..] == self[..]
4040    }
4041}
4042
4043impl PartialEq<BytesMut> for BytesVec {
4044    fn eq(&self, other: &BytesMut) -> bool {
4045        other[..] == self[..]
4046    }
4047}
4048
4049impl PartialEq<BytesVec> for BytesMut {
4050    fn eq(&self, other: &BytesVec) -> bool {
4051        other[..] == self[..]
4052    }
4053}
4054
4055impl PartialEq<[u8]> for BytesVec {
4056    fn eq(&self, other: &[u8]) -> bool {
4057        &**self == other
4058    }
4059}
4060
4061impl<const N: usize> PartialEq<[u8; N]> for BytesVec {
4062    fn eq(&self, other: &[u8; N]) -> bool {
4063        &**self == other
4064    }
4065}
4066
4067impl PartialEq<BytesVec> for [u8] {
4068    fn eq(&self, other: &BytesVec) -> bool {
4069        *other == *self
4070    }
4071}
4072
4073impl<const N: usize> PartialEq<BytesVec> for [u8; N] {
4074    fn eq(&self, other: &BytesVec) -> bool {
4075        *other == *self
4076    }
4077}
4078
4079impl<'a, const N: usize> PartialEq<BytesVec> for &'a [u8; N] {
4080    fn eq(&self, other: &BytesVec) -> bool {
4081        *other == *self
4082    }
4083}
4084
4085impl PartialEq<str> for BytesVec {
4086    fn eq(&self, other: &str) -> bool {
4087        &**self == other.as_bytes()
4088    }
4089}
4090
4091impl PartialEq<BytesVec> for str {
4092    fn eq(&self, other: &BytesVec) -> bool {
4093        *other == *self
4094    }
4095}
4096
4097impl PartialEq<Vec<u8>> for BytesVec {
4098    fn eq(&self, other: &Vec<u8>) -> bool {
4099        *self == other[..]
4100    }
4101}
4102
4103impl PartialEq<BytesVec> for Vec<u8> {
4104    fn eq(&self, other: &BytesVec) -> bool {
4105        *other == *self
4106    }
4107}
4108
4109impl PartialEq<String> for BytesVec {
4110    fn eq(&self, other: &String) -> bool {
4111        *self == other[..]
4112    }
4113}
4114
4115impl PartialEq<BytesVec> for String {
4116    fn eq(&self, other: &BytesVec) -> bool {
4117        *other == *self
4118    }
4119}
4120
4121impl<'a, T: ?Sized> PartialEq<&'a T> for BytesVec
4122where
4123    BytesVec: PartialEq<T>,
4124{
4125    fn eq(&self, other: &&'a T) -> bool {
4126        *self == **other
4127    }
4128}
4129
4130impl PartialEq<BytesVec> for &[u8] {
4131    fn eq(&self, other: &BytesVec) -> bool {
4132        *other == *self
4133    }
4134}
4135
4136impl PartialEq<BytesVec> for &str {
4137    fn eq(&self, other: &BytesVec) -> bool {
4138        *other == *self
4139    }
4140}
4141
4142// While there is `std::process:abort`, it's only available in Rust 1.17, and
4143// our minimum supported version is currently 1.15. So, this acts as an abort
4144// by triggering a double panic, which always aborts in Rust.
4145struct Abort;
4146
4147impl Drop for Abort {
4148    fn drop(&mut self) {
4149        panic!();
4150    }
4151}
4152
4153#[inline(never)]
4154#[cold]
4155fn abort() {
4156    let _a = Abort;
4157    panic!();
4158}
4159
4160#[cfg(test)]
4161mod tests {
4162    use std::collections::HashMap;
4163
4164    use super::*;
4165
4166    const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
4167        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
4168        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb";
4169
4170    #[test]
4171    fn trimdown() {
4172        let mut b = Bytes::from(LONG.to_vec());
4173        assert_eq!(b.inner.capacity(), 263);
4174        unsafe { b.inner.set_len(68) };
4175        assert_eq!(b.len(), 68);
4176        assert_eq!(b.inner.capacity(), 263);
4177        b.trimdown();
4178        assert_eq!(b.inner.capacity(), 96);
4179
4180        unsafe { b.inner.set_len(16) };
4181        b.trimdown();
4182        assert!(b.is_inline());
4183    }
4184
4185    #[test]
4186    #[allow(
4187        clippy::len_zero,
4188        clippy::nonminimal_bool,
4189        clippy::unnecessary_fallible_conversions
4190    )]
4191    fn bytes() {
4192        let mut b = Bytes::from(LONG.to_vec());
4193        b.clear();
4194        assert!(b.is_inline());
4195        assert!(b.is_empty());
4196        assert!(b.len() == 0);
4197
4198        let b = Bytes::from(&Bytes::from(LONG));
4199        assert_eq!(b, LONG);
4200
4201        let b = Bytes::from(BytesMut::from(LONG));
4202        assert_eq!(b, LONG);
4203
4204        let mut b: Bytes = BytesMut::try_from(b).unwrap().freeze();
4205        assert_eq!(b, LONG);
4206        assert!(!(b > b));
4207        assert_eq!(<Bytes as Buf>::remaining(&b), LONG.len());
4208        assert_eq!(<Bytes as Buf>::chunk(&b), LONG);
4209        <Bytes as Buf>::advance(&mut b, 10);
4210        assert_eq!(Buf::chunk(&b), &LONG[10..]);
4211
4212        let mut h: HashMap<Bytes, usize> = HashMap::default();
4213        h.insert(b.clone(), 1);
4214        assert_eq!(h.get(&b), Some(&1));
4215
4216        let mut b = BytesMut::try_from(LONG).unwrap();
4217        assert_eq!(b, LONG);
4218        assert_eq!(<BytesMut as Buf>::remaining(&b), LONG.len());
4219        assert_eq!(<BytesMut as BufMut>::remaining_mut(&b), 25);
4220        assert_eq!(<BytesMut as Buf>::chunk(&b), LONG);
4221        <BytesMut as Buf>::advance(&mut b, 10);
4222        assert_eq!(<BytesMut as Buf>::chunk(&b), &LONG[10..]);
4223
4224        let mut b = BytesMut::with_capacity(12);
4225        <BytesMut as BufMut>::put_i8(&mut b, 1);
4226        assert_eq!(b, b"\x01".as_ref());
4227        <BytesMut as BufMut>::put_u8(&mut b, 2);
4228        assert_eq!(b, b"\x01\x02".as_ref());
4229        <BytesMut as BufMut>::put_slice(&mut b, b"12345");
4230        assert_eq!(b, b"\x01\x0212345".as_ref());
4231        <BytesMut as BufMut>::chunk_mut(&mut b).write_byte(0, b'1');
4232        unsafe { <BytesMut as BufMut>::advance_mut(&mut b, 1) };
4233        assert_eq!(b, b"\x01\x02123451".as_ref());
4234    }
4235
4236    #[test]
4237    #[allow(clippy::unnecessary_fallible_conversions)]
4238    fn bytes_vec() {
4239        let bv = BytesVec::copy_from_slice(LONG);
4240        // SharedVec size is 32
4241        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>() * 9);
4242        assert_eq!(bv.len(), 263);
4243        assert_eq!(bv.as_ref().len(), 263);
4244        assert_eq!(bv.as_ref(), LONG);
4245
4246        let mut bv = BytesVec::copy_from_slice(&b"hello"[..]);
4247        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>());
4248        assert_eq!(bv.len(), 5);
4249        assert_eq!(bv.as_ref().len(), 5);
4250        assert_eq!(bv.as_ref()[0], b"h"[0]);
4251        bv.put_u8(b" "[0]);
4252        assert_eq!(bv.as_ref(), &b"hello "[..]);
4253        bv.put("world");
4254        assert_eq!(bv, "hello world");
4255
4256        let b = Bytes::from(bv);
4257        assert_eq!(b, "hello world");
4258
4259        let mut b = BytesMut::try_from(b).unwrap();
4260        b.put(".");
4261        assert_eq!(b, "hello world.");
4262    }
4263}