ntex_bytes/
bytes.rs

1use std::borrow::{Borrow, BorrowMut};
2use std::ops::{Deref, DerefMut, RangeBounds};
3use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
4use std::sync::atomic::{self, AtomicUsize};
5use std::{cmp, fmt, hash, mem, ptr, ptr::NonNull, slice};
6
7use crate::pool::{PoolId, PoolRef};
8use crate::{buf::IntoIter, buf::UninitSlice, debug, Buf, BufMut};
9
10/// A reference counted contiguous slice of memory.
11///
12/// `Bytes` is an efficient container for storing and operating on contiguous
13/// slices of memory. It is intended for use primarily in networking code, but
14/// could have applications elsewhere as well.
15///
16/// `Bytes` values facilitate zero-copy network programming by allowing multiple
17/// `Bytes` objects to point to the same underlying memory. This is managed by
18/// using a reference count to track when the memory is no longer needed and can
19/// be freed.
20///
21/// ```
22/// use ntex_bytes::Bytes;
23///
24/// let mut mem = Bytes::from(&b"Hello world"[..]);
25/// let a = mem.slice(0..5);
26///
27/// assert_eq!(a, b"Hello");
28///
29/// let b = mem.split_to(6);
30///
31/// assert_eq!(mem, b"world");
32/// assert_eq!(b, b"Hello ");
33/// ```
34///
35/// # Memory layout
36///
37/// The `Bytes` struct itself is fairly small, limited to a pointer to the
38/// memory and 4 `usize` fields used to track information about which segment of
39/// the underlying memory the `Bytes` handle has access to.
40///
41/// The memory layout looks like this:
42///
43/// ```text
44/// +-------+
45/// | Bytes |
46/// +-------+
47///  /      \_____
48/// |              \
49/// v               v
50/// +-----+------------------------------------+
51/// | Arc |         |      Data     |          |
52/// +-----+------------------------------------+
53/// ```
54///
55/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory
56/// slice and a pointer to the start of the region visible by the handle.
57/// `Bytes` also tracks the length of its view into the memory.
58///
59/// # Sharing
60///
61/// The memory itself is reference counted, and multiple `Bytes` objects may
62/// point to the same region. Each `Bytes` handle point to different sections within
63/// the memory region, and `Bytes` handle may or may not have overlapping views
64/// into the memory.
65///
66///
67/// ```text
68///
69///    Arc ptrs                   +---------+
70///    ________________________ / | Bytes 2 |
71///   /                           +---------+
72///  /          +-----------+     |         |
73/// |_________/ |  Bytes 1  |     |         |
74/// |           +-----------+     |         |
75/// |           |           | ___/ data     | tail
76/// |      data |      tail |/              |
77/// v           v           v               v
78/// +-----+---------------------------------+-----+
79/// | Arc |     |           |               |     |
80/// +-----+---------------------------------+-----+
81/// ```
82///
83/// # Mutating
84///
85/// While `Bytes` handles may potentially represent overlapping views of the
86/// underlying memory slice and may not be mutated, `BytesMut` handles are
87/// guaranteed to be the only handle able to view that slice of memory. As such,
88/// `BytesMut` handles are able to mutate the underlying memory. Note that
89/// holding a unique view to a region of memory does not mean that there are no
90/// other `Bytes` and `BytesMut` handles with disjoint views of the underlying
91/// memory.
92///
93/// # Inline bytes
94///
95/// As an optimization, when the slice referenced by a `Bytes` handle is small
96/// enough [^1]. In this case, a clone is no longer "shallow" and the data will
97/// be copied.  Converting from a `Vec` will never use inlining. `BytesMut` does
98/// not support data inlining and always allocates, but during converion to `Bytes`
99/// data from `BytesMut` could be inlined.
100///
101/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems.
102///
103pub struct Bytes {
104    inner: Inner,
105}
106
107/// A unique reference to a contiguous slice of memory.
108///
109/// `BytesMut` represents a unique view into a potentially shared memory region.
110/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
111/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
112/// allocations.
113///
114/// For more detail, see [Bytes](struct.Bytes.html).
115///
116/// # Growth
117///
118/// One key difference from `Vec<u8>` is that most operations **do not
119/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
120/// world");` could panic if `my_bytes` does not have enough capacity. Before
121/// writing to the buffer, ensure that there is enough remaining capacity by
122/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
123/// is preferable.
124///
125/// The only exception is `extend` which implicitly reserves required capacity.
126///
127/// # Examples
128///
129/// ```
130/// use ntex_bytes::{BytesMut, BufMut};
131///
132/// let mut buf = BytesMut::with_capacity(64);
133///
134/// buf.put_u8(b'h');
135/// buf.put_u8(b'e');
136/// buf.put("llo");
137///
138/// assert_eq!(buf, b"hello");
139///
140/// // Freeze the buffer so that it can be shared
141/// let a = buf.freeze();
142///
143/// // This does not allocate, instead `b` points to the same memory.
144/// let b = a.clone();
145///
146/// assert_eq!(a, b"hello");
147/// assert_eq!(b, b"hello");
148/// ```
149pub struct BytesMut {
150    inner: Inner,
151}
152
153/// A unique reference to a contiguous slice of memory.
154///
155/// `BytesVec` represents a unique view into a potentially shared memory region.
156/// Given the uniqueness guarantee, owners of `BytesVec` handles are able to
157/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
158/// allocations. It also always allocates.
159///
160/// For more detail, see [Bytes](struct.Bytes.html).
161///
162/// # Growth
163///
164/// One key difference from `Vec<u8>` is that most operations **do not
165/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
166/// world");` could panic if `my_bytes` does not have enough capacity. Before
167/// writing to the buffer, ensure that there is enough remaining capacity by
168/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
169/// is preferable.
170///
171/// The only exception is `extend` which implicitly reserves required capacity.
172///
173/// # Examples
174///
175/// ```
176/// use ntex_bytes::{BytesVec, BufMut};
177///
178/// let mut buf = BytesVec::with_capacity(64);
179///
180/// buf.put_u8(b'h');
181/// buf.put_u8(b'e');
182/// buf.put("llo");
183///
184/// assert_eq!(&buf[..], b"hello");
185///
186/// // Freeze the buffer so that it can be shared
187/// let a = buf.freeze();
188///
189/// // This does not allocate, instead `b` points to the same memory.
190/// let b = a.clone();
191///
192/// assert_eq!(a, b"hello");
193/// assert_eq!(b, b"hello");
194/// ```
195pub struct BytesVec {
196    inner: InnerVec,
197}
198
199// Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated
200// to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions
201// that mutate the underlying buffer are only performed when the data range
202// being mutated is only available via a single `BytesMut` handle.
203//
204// # Data storage modes
205//
206// The goal of `bytes` is to be as efficient as possible across a wide range of
207// potential usage patterns. As such, `bytes` needs to be able to handle buffers
208// that are never shared, shared on a single thread, and shared across many
209// threads. `bytes` also needs to handle both tiny buffers as well as very large
210// buffers. For example, [Cassandra](http://cassandra.apache.org) values have
211// been known to be in the hundreds of megabyte, and HTTP header values can be a
212// few characters in size.
213//
214// To achieve high performance in these various situations, `Bytes` and
215// `BytesMut` use different strategies for storing the buffer depending on the
216// usage pattern.
217//
218// ## Delayed `Arc` allocation
219//
220// When a `Bytes` or `BytesMut` is first created, there is only one outstanding
221// handle referencing the buffer. Since sharing is not yet required, an `Arc`* is
222// not used and the buffer is backed by a `Vec<u8>` directly. Using an
223// `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being
224// shared, that allocation is avoided.
225//
226// When sharing does become necessary (`clone`, `split_to`, `split_off`), that
227// is when the buffer is promoted to being shareable. The `Vec<u8>` is moved
228// into an `Arc` and both the original handle and the new handle use the same
229// buffer via the `Arc`.
230//
231// * `Arc` is being used to signify an atomically reference counted cell. We
232// don't use the `Arc` implementation provided by `std` and instead use our own.
233// This ends up simplifying a number of the `unsafe` code snippets.
234//
235// ## Inlining small buffers
236//
237// The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit
238// systems, this ends up being 32 bytes, which is actually a lot of storage for
239// cases where `Bytes` is being used to represent small byte strings, such as
240// HTTP header names and values.
241//
242// To avoid any allocation at all in these cases, `Bytes` will use the struct
243// itself for storing the buffer, reserving 1 byte for meta data. This means
244// that, on 64 bit systems, 31 byte buffers require no allocation at all.
245//
246// The byte used for metadata stores a 2 bits flag used to indicate that the
247// buffer is stored inline as well as 6 bits for tracking the buffer length (the
248// return value of `Bytes::len`).
249//
250// ## Static buffers
251//
252// `Bytes` can also represent a static buffer, which is created with
253// `Bytes::from_static`. No copying or allocations are required for tracking
254// static buffers. The pointer to the `&'static [u8]`, the length, and a flag
255// tracking that the `Bytes` instance represents a static buffer is stored in
256// the `Bytes` struct.
257//
258// # Struct layout
259//
260// Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the
261// data fields as well as all of the function implementations.
262//
263// The `Inner` struct is carefully laid out in order to support the
264// functionality described above as well as being as small as possible. Size is
265// important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes
266// added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header
267// map structure.
268//
269// The `Inner` struct contains the following fields:
270//
271// * `ptr: *mut u8`
272// * `len: usize`
273// * `cap: usize`
274// * `arc: *mut Shared`
275//
276// ## `ptr: *mut u8`
277//
278// A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`,
279// this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr`
280// may have been shifted to point somewhere inside the buffer.
281//
282// When in "inlined" mode, `ptr` is used as part of the inlined buffer.
283//
284// ## `len: usize`
285//
286// The length of the handle's buffer view. When backed by a `Vec<u8>`, this is
287// always the `Vec`'s length. The slice represented by `ptr` and `len` should
288// (ideally) always be initialized memory.
289//
290// When in "inlined" mode, `len` is used as part of the inlined buffer.
291//
292// ## `cap: usize`
293//
294// The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is
295// always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len`
296// may or may not be initialized memory.
297//
298// When in "inlined" mode, `cap` is used as part of the inlined buffer.
299//
300// ## `arc: *mut Shared`
301//
302// When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this
303// will be the pointer to the `Arc` structure tracking the ref count for the
304// underlying buffer. When the pointer is null, then the `Arc` has not been
305// allocated yet and `self` is the only outstanding handle for the underlying
306// buffer.
307//
308// The lower two bits of `arc` are used to track the storage mode of `Inner`.
309// `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11`
310// indicates vector storage, not yet promoted to Arc.  Since pointers to
311// allocated structures are aligned, the lower two bits of a pointer will always
312// be 0. This allows disambiguating between a pointer and the two flags.
313//
314// When in "inlined" mode, the least significant byte of `arc` is also used to
315// store the length of the buffer view (vs. the capacity, which is a constant).
316//
317// The rest of `arc`'s bytes are used as part of the inline buffer, which means
318// that those bytes need to be located next to the `ptr`, `len`, and `cap`
319// fields, which make up the rest of the inline buffer. This requires special
320// casing the layout of `Inner` depending on if the target platform is big or
321// little endian.
322//
323// On little endian platforms, the `arc` field must be the first field in the
324// struct. On big endian platforms, the `arc` field must be the last field in
325// the struct. Since a deterministic struct layout is required, `Inner` is
326// annotated with `#[repr(C)]`.
327//
328// # Thread safety
329//
330// `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done
331// by bumping the buffer ref count and returning a new struct pointing to the
332// same buffer. However, the `Arc` structure is lazily allocated. This means
333// that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone`
334// function can be called concurrently from multiple threads. This is why an
335// `AtomicPtr` is used for the `arc` field vs. a `*const`.
336//
337// Care is taken to ensure that the need for synchronization is minimized. Most
338// operations do not require any synchronization.
339//
340#[cfg(target_endian = "little")]
341#[repr(C)]
342struct Inner {
343    // WARNING: Do not access the fields directly unless you know what you are
344    // doing. Instead, use the fns. See implementation comment above.
345    arc: NonNull<Shared>,
346    ptr: *mut u8,
347    len: usize,
348    cap: usize,
349}
350
351#[cfg(target_endian = "big")]
352#[repr(C)]
353struct Inner {
354    // WARNING: Do not access the fields directly unless you know what you are
355    // doing. Instead, use the fns. See implementation comment above.
356    ptr: *mut u8,
357    len: usize,
358    cap: usize,
359    arc: NonNull<Shared>,
360}
361
362// Thread-safe reference-counted container for the shared storage. This mostly
363// the same as `std::sync::Arc` but without the weak counter. The ref counting
364// fns are based on the ones found in `std`.
365//
366// The main reason to use `Shared` instead of `std::sync::Arc` is that it ends
367// up making the overall code simpler and easier to reason about. This is due to
368// some of the logic around setting `Inner::arc` and other ways the `arc` field
369// is used. Using `Arc` ended up requiring a number of funky transmutes and
370// other shenanigans to make it work.
371struct Shared {
372    vec: Vec<u8>,
373    ref_count: AtomicUsize,
374    pool: PoolRef,
375}
376
377struct SharedVec {
378    cap: usize,
379    len: u32,
380    offset: u32,
381    ref_count: AtomicUsize,
382    pool: PoolRef,
383}
384
385// Buffer storage strategy flags.
386const KIND_ARC: usize = 0b00;
387const KIND_INLINE: usize = 0b01;
388const KIND_STATIC: usize = 0b10;
389const KIND_VEC: usize = 0b11;
390const KIND_MASK: usize = 0b11;
391const KIND_UNMASK: usize = !KIND_MASK;
392
393const MIN_NON_ZERO_CAP: usize = 64;
394const SHARED_VEC_SIZE: usize = mem::size_of::<SharedVec>();
395
396// Bit op constants for extracting the inline length value from the `arc` field.
397const INLINE_LEN_MASK: usize = 0b1111_1100;
398const INLINE_LEN_OFFSET: usize = 2;
399
400// Byte offset from the start of `Inner` to where the inline buffer data
401// starts. On little endian platforms, the first byte of the struct is the
402// storage flag, so the data is shifted by a byte. On big endian systems, the
403// data starts at the beginning of the struct.
404#[cfg(target_endian = "little")]
405const INLINE_DATA_OFFSET: isize = 2;
406#[cfg(target_endian = "big")]
407const INLINE_DATA_OFFSET: isize = 0;
408
409// Inline buffer capacity. This is the size of `Inner` minus 1 byte for the
410// metadata.
411#[cfg(target_pointer_width = "64")]
412const INLINE_CAP: usize = 4 * 8 - 2;
413#[cfg(target_pointer_width = "32")]
414const INLINE_CAP: usize = 4 * 4 - 2;
415
416/*
417 *
418 * ===== Bytes =====
419 *
420 */
421
422impl Bytes {
423    /// Creates a new empty `Bytes`.
424    ///
425    /// This will not allocate and the returned `Bytes` handle will be empty.
426    ///
427    /// # Examples
428    ///
429    /// ```
430    /// use ntex_bytes::Bytes;
431    ///
432    /// let b = Bytes::new();
433    /// assert_eq!(&b[..], b"");
434    /// ```
435    #[inline]
436    pub const fn new() -> Bytes {
437        Bytes {
438            inner: Inner::empty_inline(),
439        }
440    }
441
442    /// Creates a new `Bytes` from a static slice.
443    ///
444    /// The returned `Bytes` will point directly to the static slice. There is
445    /// no allocating or copying.
446    ///
447    /// # Examples
448    ///
449    /// ```
450    /// use ntex_bytes::Bytes;
451    ///
452    /// let b = Bytes::from_static(b"hello");
453    /// assert_eq!(&b[..], b"hello");
454    /// ```
455    #[inline]
456    pub const fn from_static(bytes: &'static [u8]) -> Bytes {
457        Bytes {
458            inner: Inner::from_static(bytes),
459        }
460    }
461
462    /// Returns the number of bytes contained in this `Bytes`.
463    ///
464    /// # Examples
465    ///
466    /// ```
467    /// use ntex_bytes::Bytes;
468    ///
469    /// let b = Bytes::from(&b"hello"[..]);
470    /// assert_eq!(b.len(), 5);
471    /// ```
472    #[inline]
473    pub fn len(&self) -> usize {
474        self.inner.len()
475    }
476
477    /// Returns true if the `Bytes` has a length of 0.
478    ///
479    /// # Examples
480    ///
481    /// ```
482    /// use ntex_bytes::Bytes;
483    ///
484    /// let b = Bytes::new();
485    /// assert!(b.is_empty());
486    /// ```
487    #[inline]
488    pub fn is_empty(&self) -> bool {
489        self.inner.is_empty()
490    }
491
492    /// Return true if the `Bytes` uses inline allocation
493    ///
494    /// # Examples
495    /// ```
496    /// use ntex_bytes::{Bytes, BytesMut};
497    ///
498    /// assert!(Bytes::from(BytesMut::from(&[0, 0, 0, 0][..])).is_inline());
499    /// assert!(Bytes::from(Vec::with_capacity(4)).is_inline());
500    /// assert!(!Bytes::from(&[0; 1024][..]).is_inline());
501    /// ```
502    pub fn is_inline(&self) -> bool {
503        self.inner.is_inline()
504    }
505
506    /// Creates `Bytes` instance from slice, by copying it.
507    pub fn copy_from_slice(data: &[u8]) -> Self {
508        Self::copy_from_slice_in(data, PoolId::DEFAULT)
509    }
510
511    /// Creates `Bytes` instance from slice, by copying it.
512    pub fn copy_from_slice_in<T>(data: &[u8], pool: T) -> Self
513    where
514        PoolRef: From<T>,
515    {
516        if data.len() <= INLINE_CAP {
517            Bytes {
518                inner: Inner::from_slice_inline(data),
519            }
520        } else {
521            Bytes {
522                inner: Inner::from_slice(data.len(), data, pool.into()),
523            }
524        }
525    }
526
527    /// Returns a slice of self for the provided range.
528    ///
529    /// This will increment the reference count for the underlying memory and
530    /// return a new `Bytes` handle set to the slice.
531    ///
532    /// This operation is `O(1)`.
533    ///
534    /// # Examples
535    ///
536    /// ```
537    /// use ntex_bytes::Bytes;
538    ///
539    /// let a = Bytes::from(b"hello world");
540    /// let b = a.slice(2..5);
541    ///
542    /// assert_eq!(&b[..], b"llo");
543    /// assert_eq!(&b[..=1], b"ll");
544    /// assert_eq!(&b[1..=1], b"l");
545    /// ```
546    ///
547    /// # Panics
548    ///
549    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
550    /// will panic.
551    pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
552        self.slice_checked(range)
553            .expect("Requires that `begin <= end` and `end <= self.len()`")
554    }
555
556    /// Returns a slice of self for the provided range.
557    ///
558    /// Does nothing if `begin <= end` or `end <= self.len()`
559    pub fn slice_checked(&self, range: impl RangeBounds<usize>) -> Option<Bytes> {
560        use std::ops::Bound;
561
562        let len = self.len();
563
564        let begin = match range.start_bound() {
565            Bound::Included(&n) => n,
566            Bound::Excluded(&n) => n + 1,
567            Bound::Unbounded => 0,
568        };
569
570        let end = match range.end_bound() {
571            Bound::Included(&n) => n + 1,
572            Bound::Excluded(&n) => n,
573            Bound::Unbounded => len,
574        };
575
576        if begin <= end && end <= len {
577            if end - begin <= INLINE_CAP {
578                Some(Bytes {
579                    inner: Inner::from_slice_inline(&self[begin..end]),
580                })
581            } else {
582                let mut ret = self.clone();
583                unsafe {
584                    ret.inner.set_end(end);
585                    ret.inner.set_start(begin);
586                }
587                Some(ret)
588            }
589        } else {
590            None
591        }
592    }
593
594    /// Returns a slice of self that is equivalent to the given `subset`.
595    ///
596    /// When processing a `Bytes` buffer with other tools, one often gets a
597    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
598    /// This function turns that `&[u8]` into another `Bytes`, as if one had
599    /// called `self.slice()` with the offsets that correspond to `subset`.
600    ///
601    /// This operation is `O(1)`.
602    ///
603    /// # Examples
604    ///
605    /// ```
606    /// use ntex_bytes::Bytes;
607    ///
608    /// let bytes = Bytes::from(&b"012345678"[..]);
609    /// let as_slice = bytes.as_ref();
610    /// let subset = &as_slice[2..6];
611    /// let subslice = bytes.slice_ref(&subset);
612    /// assert_eq!(subslice, b"2345");
613    /// ```
614    ///
615    /// # Panics
616    ///
617    /// Requires that the given `sub` slice is in fact contained within the
618    /// `Bytes` buffer; otherwise this function will panic.
619    pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
620        self.slice_ref_checked(subset)
621            .expect("Given `sub` slice is not contained within the `Bytes` buffer")
622    }
623
624    /// Returns a slice of self that is equivalent to the given `subset`.
625    pub fn slice_ref_checked(&self, subset: &[u8]) -> Option<Bytes> {
626        let bytes_p = self.as_ptr() as usize;
627        let bytes_len = self.len();
628
629        let sub_p = subset.as_ptr() as usize;
630        let sub_len = subset.len();
631
632        if sub_p >= bytes_p && sub_p + sub_len <= bytes_p + bytes_len {
633            let sub_offset = sub_p - bytes_p;
634            Some(self.slice(sub_offset..(sub_offset + sub_len)))
635        } else {
636            None
637        }
638    }
639
640    /// Splits the bytes into two at the given index.
641    ///
642    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
643    /// contains elements `[at, len)`.
644    ///
645    /// This is an `O(1)` operation that just increases the reference count and
646    /// sets a few indices.
647    ///
648    /// # Examples
649    ///
650    /// ```
651    /// use ntex_bytes::Bytes;
652    ///
653    /// let mut a = Bytes::from(&b"hello world"[..]);
654    /// let b = a.split_off(5);
655    ///
656    /// assert_eq!(a, b"hello");
657    /// assert_eq!(b, b" world");
658    /// ```
659    ///
660    /// # Panics
661    ///
662    /// Panics if `at > self.len()`.
663    pub fn split_off(&mut self, at: usize) -> Bytes {
664        self.split_off_checked(at)
665            .expect("at value must be <= self.len()`")
666    }
667
668    /// Splits the bytes into two at the given index.
669    ///
670    /// Does nothing if `at > self.len()`
671    pub fn split_off_checked(&mut self, at: usize) -> Option<Bytes> {
672        if at <= self.len() {
673            if at == self.len() {
674                Some(Bytes::new())
675            } else if at == 0 {
676                Some(mem::replace(self, Bytes::new()))
677            } else {
678                Some(Bytes {
679                    inner: self.inner.split_off(at, true),
680                })
681            }
682        } else {
683            None
684        }
685    }
686
687    /// Splits the bytes into two at the given index.
688    ///
689    /// Afterwards `self` contains elements `[at, len)`, and the returned
690    /// `Bytes` contains elements `[0, at)`.
691    ///
692    /// This is an `O(1)` operation that just increases the reference count and
693    /// sets a few indices.
694    ///
695    /// # Examples
696    ///
697    /// ```
698    /// use ntex_bytes::Bytes;
699    ///
700    /// let mut a = Bytes::from(&b"hello world"[..]);
701    /// let b = a.split_to(5);
702    ///
703    /// assert_eq!(a, b" world");
704    /// assert_eq!(b, b"hello");
705    /// ```
706    ///
707    /// # Panics
708    ///
709    /// Panics if `at > len`.
710    pub fn split_to(&mut self, at: usize) -> Bytes {
711        self.split_to_checked(at)
712            .expect("at value must be <= self.len()`")
713    }
714
715    /// Splits the bytes into two at the given index.
716    ///
717    /// Does nothing if `at > len`.
718    pub fn split_to_checked(&mut self, at: usize) -> Option<Bytes> {
719        if at <= self.len() {
720            if at == self.len() {
721                Some(mem::replace(self, Bytes::new()))
722            } else if at == 0 {
723                Some(Bytes::new())
724            } else {
725                Some(Bytes {
726                    inner: self.inner.split_to(at, true),
727                })
728            }
729        } else {
730            None
731        }
732    }
733
734    /// Shortens the buffer, keeping the first `len` bytes and dropping the
735    /// rest.
736    ///
737    /// If `len` is greater than the buffer's current length, this has no
738    /// effect.
739    ///
740    /// The [`split_off`] method can emulate `truncate`, but this causes the
741    /// excess bytes to be returned instead of dropped.
742    ///
743    /// # Examples
744    ///
745    /// ```
746    /// use ntex_bytes::Bytes;
747    ///
748    /// let mut buf = Bytes::from(&b"hello world"[..]);
749    /// buf.truncate(5);
750    /// assert_eq!(buf, b"hello"[..]);
751    /// ```
752    ///
753    /// [`split_off`]: #method.split_off
754    #[inline]
755    pub fn truncate(&mut self, len: usize) {
756        self.inner.truncate(len, true);
757    }
758
759    /// Shortens the buffer to `len` bytes and dropping the rest.
760    ///
761    /// This is useful if underlying buffer is larger than cuurrent bytes object.
762    ///
763    /// # Examples
764    ///
765    /// ```
766    /// use ntex_bytes::Bytes;
767    ///
768    /// let mut buf = Bytes::from(&b"hello world"[..]);
769    /// buf.trimdown();
770    /// assert_eq!(buf, b"hello world"[..]);
771    /// ```
772    #[inline]
773    pub fn trimdown(&mut self) {
774        let kind = self.inner.kind();
775
776        // trim down only if buffer is not inline or static and
777        // buffer's unused space is greater than 64 bytes
778        if !(kind == KIND_INLINE || kind == KIND_STATIC) {
779            if self.inner.len() <= INLINE_CAP {
780                *self = Bytes {
781                    inner: Inner::from_slice_inline(self),
782                };
783            } else if self.inner.capacity() - self.inner.len() >= 64 {
784                *self = Bytes {
785                    inner: Inner::from_slice(self.len(), self, self.inner.pool()),
786                }
787            }
788        }
789    }
790
791    /// Clears the buffer, removing all data.
792    ///
793    /// # Examples
794    ///
795    /// ```
796    /// use ntex_bytes::Bytes;
797    ///
798    /// let mut buf = Bytes::from(&b"hello world"[..]);
799    /// buf.clear();
800    /// assert!(buf.is_empty());
801    /// ```
802    #[inline]
803    pub fn clear(&mut self) {
804        self.inner = Inner::empty_inline();
805    }
806
807    /// Attempts to convert into a `BytesMut` handle.
808    ///
809    /// This will only succeed if there are no other outstanding references to
810    /// the underlying chunk of memory. `Bytes` handles that contain inlined
811    /// bytes will always be convertible to `BytesMut`.
812    ///
813    /// # Examples
814    ///
815    /// ```
816    /// use ntex_bytes::Bytes;
817    ///
818    /// let a = Bytes::copy_from_slice(&b"Mary had a little lamb, little lamb, little lamb..."[..]);
819    ///
820    /// // Create a shallow clone
821    /// let b = a.clone();
822    ///
823    /// // This will fail because `b` shares a reference with `a`
824    /// let a = a.try_mut().unwrap_err();
825    ///
826    /// drop(b);
827    ///
828    /// // This will succeed
829    /// let mut a = a.try_mut().unwrap();
830    ///
831    /// a[0] = b'b';
832    ///
833    /// assert_eq!(&a[..4], b"bary");
834    /// ```
835    pub fn try_mut(self) -> Result<BytesMut, Bytes> {
836        if self.inner.is_mut_safe() {
837            Ok(BytesMut { inner: self.inner })
838        } else {
839            Err(self)
840        }
841    }
842
843    /// Returns an iterator over the bytes contained by the buffer.
844    ///
845    /// # Examples
846    ///
847    /// ```
848    /// use ntex_bytes::{Buf, Bytes};
849    ///
850    /// let buf = Bytes::from(&b"abc"[..]);
851    /// let mut iter = buf.iter();
852    ///
853    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
854    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
855    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
856    /// assert_eq!(iter.next(), None);
857    /// ```
858    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
859        self.chunk().iter()
860    }
861}
862
863impl Buf for Bytes {
864    #[inline]
865    fn remaining(&self) -> usize {
866        self.len()
867    }
868
869    #[inline]
870    fn chunk(&self) -> &[u8] {
871        self.inner.as_ref()
872    }
873
874    #[inline]
875    fn advance(&mut self, cnt: usize) {
876        assert!(
877            cnt <= self.inner.as_ref().len(),
878            "cannot advance past `remaining`"
879        );
880        unsafe {
881            self.inner.set_start(cnt);
882        }
883    }
884}
885
886impl bytes::buf::Buf for Bytes {
887    #[inline]
888    fn remaining(&self) -> usize {
889        self.len()
890    }
891
892    #[inline]
893    fn chunk(&self) -> &[u8] {
894        self.inner.as_ref()
895    }
896
897    #[inline]
898    fn advance(&mut self, cnt: usize) {
899        assert!(
900            cnt <= self.inner.as_ref().len(),
901            "cannot advance past `remaining`"
902        );
903        unsafe {
904            self.inner.set_start(cnt);
905        }
906    }
907}
908
909impl Clone for Bytes {
910    fn clone(&self) -> Bytes {
911        Bytes {
912            inner: unsafe { self.inner.shallow_clone() },
913        }
914    }
915}
916
917impl AsRef<[u8]> for Bytes {
918    #[inline]
919    fn as_ref(&self) -> &[u8] {
920        self.inner.as_ref()
921    }
922}
923
924impl Deref for Bytes {
925    type Target = [u8];
926
927    #[inline]
928    fn deref(&self) -> &[u8] {
929        self.inner.as_ref()
930    }
931}
932
933impl From<&Bytes> for Bytes {
934    fn from(src: &Bytes) -> Bytes {
935        src.clone()
936    }
937}
938
939impl From<BytesMut> for Bytes {
940    fn from(src: BytesMut) -> Bytes {
941        src.freeze()
942    }
943}
944
945impl From<Vec<u8>> for Bytes {
946    /// Convert a `Vec` into a `Bytes`
947    ///
948    /// This constructor may be used to avoid the inlining optimization used by
949    /// `with_capacity`.  A `Bytes` constructed this way will always store its
950    /// data on the heap.
951    fn from(src: Vec<u8>) -> Bytes {
952        if src.is_empty() {
953            Bytes::new()
954        } else if src.len() <= INLINE_CAP {
955            Bytes {
956                inner: Inner::from_slice_inline(&src),
957            }
958        } else {
959            BytesMut::from(src).freeze()
960        }
961    }
962}
963
964impl From<String> for Bytes {
965    fn from(src: String) -> Bytes {
966        if src.is_empty() {
967            Bytes::new()
968        } else if src.len() <= INLINE_CAP {
969            Bytes {
970                inner: Inner::from_slice_inline(src.as_bytes()),
971            }
972        } else {
973            BytesMut::from(src).freeze()
974        }
975    }
976}
977
978impl From<&'static [u8]> for Bytes {
979    fn from(src: &'static [u8]) -> Bytes {
980        Bytes::from_static(src)
981    }
982}
983
984impl From<&'static str> for Bytes {
985    fn from(src: &'static str) -> Bytes {
986        Bytes::from_static(src.as_bytes())
987    }
988}
989
990impl<'a, const N: usize> From<&'a [u8; N]> for Bytes {
991    fn from(src: &'a [u8; N]) -> Bytes {
992        Bytes::copy_from_slice(src)
993    }
994}
995
996impl FromIterator<u8> for Bytes {
997    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
998        BytesMut::from_iter(into_iter).freeze()
999    }
1000}
1001
1002impl<'a> FromIterator<&'a u8> for Bytes {
1003    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1004        BytesMut::from_iter(into_iter).freeze()
1005    }
1006}
1007
1008impl Eq for Bytes {}
1009
1010impl PartialEq for Bytes {
1011    fn eq(&self, other: &Bytes) -> bool {
1012        self.inner.as_ref() == other.inner.as_ref()
1013    }
1014}
1015
1016impl PartialOrd for Bytes {
1017    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
1018        Some(self.cmp(other))
1019    }
1020}
1021
1022impl Ord for Bytes {
1023    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
1024        self.inner.as_ref().cmp(other.inner.as_ref())
1025    }
1026}
1027
1028impl Default for Bytes {
1029    #[inline]
1030    fn default() -> Bytes {
1031        Bytes::new()
1032    }
1033}
1034
1035impl fmt::Debug for Bytes {
1036    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1037        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
1038    }
1039}
1040
1041impl hash::Hash for Bytes {
1042    fn hash<H>(&self, state: &mut H)
1043    where
1044        H: hash::Hasher,
1045    {
1046        let s: &[u8] = self.as_ref();
1047        s.hash(state);
1048    }
1049}
1050
1051impl Borrow<[u8]> for Bytes {
1052    fn borrow(&self) -> &[u8] {
1053        self.as_ref()
1054    }
1055}
1056
1057impl IntoIterator for Bytes {
1058    type Item = u8;
1059    type IntoIter = IntoIter<Bytes>;
1060
1061    fn into_iter(self) -> Self::IntoIter {
1062        IntoIter::new(self)
1063    }
1064}
1065
1066impl<'a> IntoIterator for &'a Bytes {
1067    type Item = &'a u8;
1068    type IntoIter = std::slice::Iter<'a, u8>;
1069
1070    fn into_iter(self) -> Self::IntoIter {
1071        self.as_ref().iter()
1072    }
1073}
1074
1075/*
1076 *
1077 * ===== BytesMut =====
1078 *
1079 */
1080
1081impl BytesMut {
1082    /// Creates a new `BytesMut` with the specified capacity.
1083    ///
1084    /// The returned `BytesMut` will be able to hold at least `capacity` bytes
1085    /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
1086    /// then `BytesMut` will not allocate.
1087    ///
1088    /// It is important to note that this function does not specify the length
1089    /// of the returned `BytesMut`, but only the capacity.
1090    ///
1091    /// # Panics
1092    ///
1093    /// Panics if `capacity` greater than 60bit for 64bit systems
1094    /// and 28bit for 32bit systems
1095    ///
1096    /// # Examples
1097    ///
1098    /// ```
1099    /// use ntex_bytes::{BytesMut, BufMut};
1100    ///
1101    /// let mut bytes = BytesMut::with_capacity(64);
1102    ///
1103    /// // `bytes` contains no data, even though there is capacity
1104    /// assert_eq!(bytes.len(), 0);
1105    ///
1106    /// bytes.put(&b"hello world"[..]);
1107    ///
1108    /// assert_eq!(&bytes[..], b"hello world");
1109    /// ```
1110    #[inline]
1111    pub fn with_capacity(capacity: usize) -> BytesMut {
1112        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1113    }
1114
1115    /// Creates a new `BytesMut` with the specified capacity and in specified memory pool.
1116    ///
1117    /// # Examples
1118    ///
1119    /// ```
1120    /// use ntex_bytes::{BytesMut, BufMut, PoolId};
1121    ///
1122    /// let mut bytes = BytesMut::with_capacity_in(64, PoolId::P1);
1123    ///
1124    /// // `bytes` contains no data, even though there is capacity
1125    /// assert_eq!(bytes.len(), 0);
1126    ///
1127    /// bytes.put(&b"hello world"[..]);
1128    ///
1129    /// assert_eq!(&bytes[..], b"hello world");
1130    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
1131    /// ```
1132    #[inline]
1133    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesMut
1134    where
1135        PoolRef: From<T>,
1136    {
1137        BytesMut {
1138            inner: Inner::with_capacity(capacity, pool.into()),
1139        }
1140    }
1141
1142    /// Creates a new `BytesMut` from slice, by copying it.
1143    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
1144        Self::copy_from_slice_in(src, PoolId::DEFAULT)
1145    }
1146
1147    /// Creates a new `BytesMut` from slice, by copying it.
1148    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
1149    where
1150        T: AsRef<[u8]>,
1151        PoolRef: From<U>,
1152    {
1153        let s = src.as_ref();
1154        BytesMut {
1155            inner: Inner::from_slice(s.len(), s, pool.into()),
1156        }
1157    }
1158
1159    #[inline]
1160    /// Convert a `Vec` into a `BytesMut`
1161    pub fn from_vec<T>(src: Vec<u8>, pool: T) -> BytesMut
1162    where
1163        PoolRef: From<T>,
1164    {
1165        BytesMut {
1166            inner: Inner::from_vec(src, pool.into()),
1167        }
1168    }
1169
1170    /// Creates a new `BytesMut` with default capacity.
1171    ///
1172    /// Resulting object has length 0 and unspecified capacity.
1173    /// This function does not allocate.
1174    ///
1175    /// # Examples
1176    ///
1177    /// ```
1178    /// use ntex_bytes::{BytesMut, BufMut};
1179    ///
1180    /// let mut bytes = BytesMut::new();
1181    ///
1182    /// assert_eq!(0, bytes.len());
1183    ///
1184    /// bytes.reserve(2);
1185    /// bytes.put_slice(b"xy");
1186    ///
1187    /// assert_eq!(&b"xy"[..], &bytes[..]);
1188    /// ```
1189    #[inline]
1190    pub fn new() -> BytesMut {
1191        BytesMut::with_capacity(MIN_NON_ZERO_CAP)
1192    }
1193
1194    /// Returns the number of bytes contained in this `BytesMut`.
1195    ///
1196    /// # Examples
1197    ///
1198    /// ```
1199    /// use ntex_bytes::BytesMut;
1200    ///
1201    /// let b = BytesMut::from(&b"hello"[..]);
1202    /// assert_eq!(b.len(), 5);
1203    /// ```
1204    #[inline]
1205    pub fn len(&self) -> usize {
1206        self.inner.len()
1207    }
1208
1209    /// Returns true if the `BytesMut` has a length of 0.
1210    ///
1211    /// # Examples
1212    ///
1213    /// ```
1214    /// use ntex_bytes::BytesMut;
1215    ///
1216    /// let b = BytesMut::with_capacity(64);
1217    /// assert!(b.is_empty());
1218    /// ```
1219    #[inline]
1220    pub fn is_empty(&self) -> bool {
1221        self.inner.is_empty()
1222    }
1223
1224    /// Returns the number of bytes the `BytesMut` can hold without reallocating.
1225    ///
1226    /// # Examples
1227    ///
1228    /// ```
1229    /// use ntex_bytes::BytesMut;
1230    ///
1231    /// let b = BytesMut::with_capacity(64);
1232    /// assert_eq!(b.capacity(), 64);
1233    /// ```
1234    #[inline]
1235    pub fn capacity(&self) -> usize {
1236        self.inner.capacity()
1237    }
1238
1239    /// Converts `self` into an immutable `Bytes`.
1240    ///
1241    /// The conversion is zero cost and is used to indicate that the slice
1242    /// referenced by the handle will no longer be mutated. Once the conversion
1243    /// is done, the handle can be cloned and shared across threads.
1244    ///
1245    /// # Examples
1246    ///
1247    /// ```
1248    /// use ntex_bytes::{BytesMut, BufMut};
1249    /// use std::thread;
1250    ///
1251    /// let mut b = BytesMut::with_capacity(64);
1252    /// b.put("hello world");
1253    /// let b1 = b.freeze();
1254    /// let b2 = b1.clone();
1255    ///
1256    /// let th = thread::spawn(move || {
1257    ///     assert_eq!(b1, b"hello world");
1258    /// });
1259    ///
1260    /// assert_eq!(b2, b"hello world");
1261    /// th.join().unwrap();
1262    /// ```
1263    #[inline]
1264    pub fn freeze(self) -> Bytes {
1265        if self.inner.len() <= INLINE_CAP {
1266            Bytes {
1267                inner: Inner::from_slice_inline(self.inner.as_ref()),
1268            }
1269        } else {
1270            Bytes { inner: self.inner }
1271        }
1272    }
1273
1274    /// Splits the bytes into two at the given index.
1275    ///
1276    /// Afterwards `self` contains elements `[0, at)`, and the returned
1277    /// `BytesMut` contains elements `[at, capacity)`.
1278    ///
1279    /// This is an `O(1)` operation that just increases the reference count
1280    /// and sets a few indices.
1281    ///
1282    /// # Examples
1283    ///
1284    /// ```
1285    /// use ntex_bytes::BytesMut;
1286    ///
1287    /// let mut a = BytesMut::from(&b"hello world"[..]);
1288    /// let mut b = a.split_off(5);
1289    ///
1290    /// a[0] = b'j';
1291    /// b[0] = b'!';
1292    ///
1293    /// assert_eq!(&a[..], b"jello");
1294    /// assert_eq!(&b[..], b"!world");
1295    /// ```
1296    ///
1297    /// # Panics
1298    ///
1299    /// Panics if `at > capacity`.
1300    pub fn split_off(&mut self, at: usize) -> BytesMut {
1301        BytesMut {
1302            inner: self.inner.split_off(at, false),
1303        }
1304    }
1305
1306    /// Removes the bytes from the current view, returning them in a new
1307    /// `BytesMut` handle.
1308    ///
1309    /// Afterwards, `self` will be empty, but will retain any additional
1310    /// capacity that it had before the operation. This is identical to
1311    /// `self.split_to(self.len())`.
1312    ///
1313    /// This is an `O(1)` operation that just increases the reference count and
1314    /// sets a few indices.
1315    ///
1316    /// # Examples
1317    ///
1318    /// ```
1319    /// use ntex_bytes::{BytesMut, BufMut};
1320    ///
1321    /// let mut buf = BytesMut::with_capacity(1024);
1322    /// buf.put(&b"hello world"[..]);
1323    ///
1324    /// let other = buf.split();
1325    ///
1326    /// assert!(buf.is_empty());
1327    /// assert_eq!(1013, buf.capacity());
1328    ///
1329    /// assert_eq!(other, b"hello world"[..]);
1330    /// ```
1331    pub fn split(&mut self) -> BytesMut {
1332        self.split_to(self.len())
1333    }
1334
1335    /// Splits the buffer into two at the given index.
1336    ///
1337    /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
1338    /// contains elements `[0, at)`.
1339    ///
1340    /// This is an `O(1)` operation that just increases the reference count and
1341    /// sets a few indices.
1342    ///
1343    /// # Examples
1344    ///
1345    /// ```
1346    /// use ntex_bytes::BytesMut;
1347    ///
1348    /// let mut a = BytesMut::from(&b"hello world"[..]);
1349    /// let mut b = a.split_to(5);
1350    ///
1351    /// a[0] = b'!';
1352    /// b[0] = b'j';
1353    ///
1354    /// assert_eq!(&a[..], b"!world");
1355    /// assert_eq!(&b[..], b"jello");
1356    /// ```
1357    ///
1358    /// # Panics
1359    ///
1360    /// Panics if `at > len`.
1361    pub fn split_to(&mut self, at: usize) -> BytesMut {
1362        self.split_to_checked(at)
1363            .expect("at value must be <= self.len()`")
1364    }
1365
1366    /// Splits the bytes into two at the given index.
1367    ///
1368    /// Does nothing if `at > len`.
1369    pub fn split_to_checked(&mut self, at: usize) -> Option<BytesMut> {
1370        if at <= self.len() {
1371            Some(BytesMut {
1372                inner: self.inner.split_to(at, false),
1373            })
1374        } else {
1375            None
1376        }
1377    }
1378
1379    /// Shortens the buffer, keeping the first `len` bytes and dropping the
1380    /// rest.
1381    ///
1382    /// If `len` is greater than the buffer's current length, this has no
1383    /// effect.
1384    ///
1385    /// The [`split_off`] method can emulate `truncate`, but this causes the
1386    /// excess bytes to be returned instead of dropped.
1387    ///
1388    /// # Examples
1389    ///
1390    /// ```
1391    /// use ntex_bytes::BytesMut;
1392    ///
1393    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1394    /// buf.truncate(5);
1395    /// assert_eq!(buf, b"hello"[..]);
1396    /// ```
1397    ///
1398    /// [`split_off`]: #method.split_off
1399    pub fn truncate(&mut self, len: usize) {
1400        self.inner.truncate(len, false);
1401    }
1402
1403    /// Clears the buffer, removing all data.
1404    ///
1405    /// # Examples
1406    ///
1407    /// ```
1408    /// use ntex_bytes::BytesMut;
1409    ///
1410    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1411    /// buf.clear();
1412    /// assert!(buf.is_empty());
1413    /// ```
1414    pub fn clear(&mut self) {
1415        self.truncate(0);
1416    }
1417
1418    /// Resizes the buffer so that `len` is equal to `new_len`.
1419    ///
1420    /// If `new_len` is greater than `len`, the buffer is extended by the
1421    /// difference with each additional byte set to `value`. If `new_len` is
1422    /// less than `len`, the buffer is simply truncated.
1423    ///
1424    /// # Panics
1425    ///
1426    /// Panics if `new_len` greater than 60bit for 64bit systems
1427    /// and 28bit for 32bit systems
1428    ///
1429    /// # Examples
1430    ///
1431    /// ```
1432    /// use ntex_bytes::BytesMut;
1433    ///
1434    /// let mut buf = BytesMut::new();
1435    ///
1436    /// buf.resize(3, 0x1);
1437    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
1438    ///
1439    /// buf.resize(2, 0x2);
1440    /// assert_eq!(&buf[..], &[0x1, 0x1]);
1441    ///
1442    /// buf.resize(4, 0x3);
1443    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
1444    /// ```
1445    #[inline]
1446    pub fn resize(&mut self, new_len: usize, value: u8) {
1447        self.inner.resize(new_len, value);
1448    }
1449
1450    /// Sets the length of the buffer.
1451    ///
1452    /// This will explicitly set the size of the buffer without actually
1453    /// modifying the data, so it is up to the caller to ensure that the data
1454    /// has been initialized.
1455    ///
1456    /// # Examples
1457    ///
1458    /// ```
1459    /// use ntex_bytes::BytesMut;
1460    ///
1461    /// let mut b = BytesMut::from(&b"hello world"[..]);
1462    ///
1463    /// unsafe {
1464    ///     b.set_len(5);
1465    /// }
1466    ///
1467    /// assert_eq!(&b[..], b"hello");
1468    ///
1469    /// unsafe {
1470    ///     b.set_len(11);
1471    /// }
1472    ///
1473    /// assert_eq!(&b[..], b"hello world");
1474    /// ```
1475    ///
1476    /// # Panics
1477    ///
1478    /// This method will panic if `len` is out of bounds for the underlying
1479    /// slice or if it comes after the `end` of the configured window.
1480    #[inline]
1481    #[allow(clippy::missing_safety_doc)]
1482    pub unsafe fn set_len(&mut self, len: usize) {
1483        self.inner.set_len(len)
1484    }
1485
1486    /// Reserves capacity for at least `additional` more bytes to be inserted
1487    /// into the given `BytesMut`.
1488    ///
1489    /// More than `additional` bytes may be reserved in order to avoid frequent
1490    /// reallocations. A call to `reserve` may result in an allocation.
1491    ///
1492    /// Before allocating new buffer space, the function will attempt to reclaim
1493    /// space in the existing buffer. If the current handle references a small
1494    /// view in the original buffer and all other handles have been dropped,
1495    /// and the requested capacity is less than or equal to the existing
1496    /// buffer's capacity, then the current view will be copied to the front of
1497    /// the buffer and the handle will take ownership of the full buffer.
1498    ///
1499    /// # Panics
1500    ///
1501    /// Panics if new capacity is greater than 60bit for 64bit systems
1502    /// and 28bit for 32bit systems
1503    ///
1504    /// # Examples
1505    ///
1506    /// In the following example, a new buffer is allocated.
1507    ///
1508    /// ```
1509    /// use ntex_bytes::BytesMut;
1510    ///
1511    /// let mut buf = BytesMut::from(&b"hello"[..]);
1512    /// buf.reserve(64);
1513    /// assert!(buf.capacity() >= 69);
1514    /// ```
1515    ///
1516    /// In the following example, the existing buffer is reclaimed.
1517    ///
1518    /// ```
1519    /// use ntex_bytes::{BytesMut, BufMut};
1520    ///
1521    /// let mut buf = BytesMut::with_capacity(128);
1522    /// buf.put(&[0; 64][..]);
1523    ///
1524    /// let ptr = buf.as_ptr();
1525    /// let other = buf.split();
1526    ///
1527    /// assert!(buf.is_empty());
1528    /// assert_eq!(buf.capacity(), 64);
1529    ///
1530    /// drop(other);
1531    /// buf.reserve(128);
1532    ///
1533    /// assert_eq!(buf.capacity(), 128);
1534    /// assert_eq!(buf.as_ptr(), ptr);
1535    /// ```
1536    ///
1537    /// # Panics
1538    ///
1539    /// Panics if the new capacity overflows `usize`.
1540    #[inline]
1541    pub fn reserve(&mut self, additional: usize) {
1542        let len = self.len();
1543        let rem = self.capacity() - len;
1544
1545        if additional <= rem {
1546            // The handle can already store at least `additional` more bytes, so
1547            // there is no further work needed to be done.
1548            return;
1549        }
1550
1551        self.inner.reserve_inner(additional);
1552    }
1553
1554    /// Appends given bytes to this object.
1555    ///
1556    /// If this `BytesMut` object has not enough capacity, it is resized first.
1557    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
1558    ///
1559    /// # Examples
1560    ///
1561    /// ```
1562    /// use ntex_bytes::BytesMut;
1563    ///
1564    /// let mut buf = BytesMut::with_capacity(0);
1565    /// buf.extend_from_slice(b"aaabbb");
1566    /// buf.extend_from_slice(b"cccddd");
1567    ///
1568    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
1569    /// ```
1570    #[inline]
1571    pub fn extend_from_slice(&mut self, extend: &[u8]) {
1572        self.put_slice(extend);
1573    }
1574
1575    /// Returns an iterator over the bytes contained by the buffer.
1576    ///
1577    /// # Examples
1578    ///
1579    /// ```
1580    /// use ntex_bytes::{Buf, BytesMut};
1581    ///
1582    /// let buf = BytesMut::from(&b"abc"[..]);
1583    /// let mut iter = buf.iter();
1584    ///
1585    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
1586    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
1587    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
1588    /// assert_eq!(iter.next(), None);
1589    /// ```
1590    #[inline]
1591    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
1592        self.chunk().iter()
1593    }
1594
1595    #[cfg(feature = "mpool")]
1596    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
1597        self.inner.move_to_pool(pool);
1598    }
1599}
1600
1601impl Buf for BytesMut {
1602    #[inline]
1603    fn remaining(&self) -> usize {
1604        self.len()
1605    }
1606
1607    #[inline]
1608    fn chunk(&self) -> &[u8] {
1609        self.inner.as_ref()
1610    }
1611
1612    #[inline]
1613    fn advance(&mut self, cnt: usize) {
1614        assert!(
1615            cnt <= self.inner.as_ref().len(),
1616            "cannot advance past `remaining`"
1617        );
1618        unsafe {
1619            self.inner.set_start(cnt);
1620        }
1621    }
1622}
1623
1624impl BufMut for BytesMut {
1625    #[inline]
1626    fn remaining_mut(&self) -> usize {
1627        self.capacity() - self.len()
1628    }
1629
1630    #[inline]
1631    unsafe fn advance_mut(&mut self, cnt: usize) {
1632        let new_len = self.len() + cnt;
1633
1634        // This call will panic if `cnt` is too big
1635        self.inner.set_len(new_len);
1636    }
1637
1638    #[inline]
1639    fn chunk_mut(&mut self) -> &mut UninitSlice {
1640        let len = self.len();
1641
1642        unsafe {
1643            // This will never panic as `len` can never become invalid
1644            let ptr = &mut self.inner.as_raw()[len..];
1645
1646            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
1647        }
1648    }
1649
1650    #[inline]
1651    fn put_slice(&mut self, src: &[u8]) {
1652        let len = src.len();
1653        self.reserve(len);
1654
1655        unsafe {
1656            ptr::copy_nonoverlapping(src.as_ptr(), self.chunk_mut().as_mut_ptr(), len);
1657            self.advance_mut(len);
1658        }
1659    }
1660
1661    #[inline]
1662    fn put_u8(&mut self, n: u8) {
1663        self.reserve(1);
1664        self.inner.put_u8(n);
1665    }
1666
1667    #[inline]
1668    fn put_i8(&mut self, n: i8) {
1669        self.reserve(1);
1670        self.put_u8(n as u8);
1671    }
1672}
1673
1674impl bytes::buf::Buf for BytesMut {
1675    #[inline]
1676    fn remaining(&self) -> usize {
1677        self.len()
1678    }
1679
1680    #[inline]
1681    fn chunk(&self) -> &[u8] {
1682        self.inner.as_ref()
1683    }
1684
1685    #[inline]
1686    fn advance(&mut self, cnt: usize) {
1687        Buf::advance(self, cnt)
1688    }
1689}
1690
1691unsafe impl bytes::buf::BufMut for BytesMut {
1692    #[inline]
1693    fn remaining_mut(&self) -> usize {
1694        BufMut::remaining_mut(self)
1695    }
1696
1697    #[inline]
1698    unsafe fn advance_mut(&mut self, cnt: usize) {
1699        BufMut::advance_mut(self, cnt)
1700    }
1701
1702    #[inline]
1703    fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice {
1704        let len = self.len();
1705        unsafe {
1706            // This will never panic as `len` can never become invalid
1707            let ptr = &mut self.inner.as_raw()[len..];
1708            bytes::buf::UninitSlice::from_raw_parts_mut(
1709                ptr.as_mut_ptr(),
1710                self.capacity() - len,
1711            )
1712        }
1713    }
1714
1715    #[inline]
1716    fn put_slice(&mut self, src: &[u8]) {
1717        BufMut::put_slice(self, src)
1718    }
1719
1720    #[inline]
1721    fn put_u8(&mut self, n: u8) {
1722        BufMut::put_u8(self, n)
1723    }
1724
1725    #[inline]
1726    fn put_i8(&mut self, n: i8) {
1727        BufMut::put_i8(self, n)
1728    }
1729}
1730
1731impl AsRef<[u8]> for BytesMut {
1732    #[inline]
1733    fn as_ref(&self) -> &[u8] {
1734        self.inner.as_ref()
1735    }
1736}
1737
1738impl AsMut<[u8]> for BytesMut {
1739    #[inline]
1740    fn as_mut(&mut self) -> &mut [u8] {
1741        self.inner.as_mut()
1742    }
1743}
1744
1745impl Deref for BytesMut {
1746    type Target = [u8];
1747
1748    #[inline]
1749    fn deref(&self) -> &[u8] {
1750        self.as_ref()
1751    }
1752}
1753
1754impl DerefMut for BytesMut {
1755    #[inline]
1756    fn deref_mut(&mut self) -> &mut [u8] {
1757        self.inner.as_mut()
1758    }
1759}
1760
1761impl From<Vec<u8>> for BytesMut {
1762    #[inline]
1763    /// Convert a `Vec` into a `BytesMut`
1764    ///
1765    /// This constructor may be used to avoid the inlining optimization used by
1766    /// `with_capacity`.  A `BytesMut` constructed this way will always store
1767    /// its data on the heap.
1768    fn from(src: Vec<u8>) -> BytesMut {
1769        BytesMut::from_vec(src, PoolId::DEFAULT.pool_ref())
1770    }
1771}
1772
1773impl From<String> for BytesMut {
1774    #[inline]
1775    fn from(src: String) -> BytesMut {
1776        BytesMut::from_vec(src.into_bytes(), PoolId::DEFAULT.pool_ref())
1777    }
1778}
1779
1780impl<'a> From<&'a [u8]> for BytesMut {
1781    fn from(src: &'a [u8]) -> BytesMut {
1782        if src.is_empty() {
1783            BytesMut::new()
1784        } else {
1785            BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1786        }
1787    }
1788}
1789
1790impl<const N: usize> From<[u8; N]> for BytesMut {
1791    fn from(src: [u8; N]) -> BytesMut {
1792        BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1793    }
1794}
1795
1796impl<'a, const N: usize> From<&'a [u8; N]> for BytesMut {
1797    fn from(src: &'a [u8; N]) -> BytesMut {
1798        BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1799    }
1800}
1801
1802impl<'a> From<&'a str> for BytesMut {
1803    #[inline]
1804    fn from(src: &'a str) -> BytesMut {
1805        BytesMut::from(src.as_bytes())
1806    }
1807}
1808
1809impl From<Bytes> for BytesMut {
1810    #[inline]
1811    fn from(src: Bytes) -> BytesMut {
1812        src.try_mut()
1813            .unwrap_or_else(|src| BytesMut::copy_from_slice_in(&src[..], src.inner.pool()))
1814    }
1815}
1816
1817impl Eq for BytesMut {}
1818
1819impl PartialEq for BytesMut {
1820    #[inline]
1821    fn eq(&self, other: &BytesMut) -> bool {
1822        self.inner.as_ref() == other.inner.as_ref()
1823    }
1824}
1825
1826impl Default for BytesMut {
1827    #[inline]
1828    fn default() -> BytesMut {
1829        BytesMut::new()
1830    }
1831}
1832
1833impl Borrow<[u8]> for BytesMut {
1834    #[inline]
1835    fn borrow(&self) -> &[u8] {
1836        self.as_ref()
1837    }
1838}
1839
1840impl BorrowMut<[u8]> for BytesMut {
1841    #[inline]
1842    fn borrow_mut(&mut self) -> &mut [u8] {
1843        self.as_mut()
1844    }
1845}
1846
1847impl fmt::Debug for BytesMut {
1848    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1849        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
1850    }
1851}
1852
1853impl fmt::Write for BytesMut {
1854    #[inline]
1855    fn write_str(&mut self, s: &str) -> fmt::Result {
1856        if self.remaining_mut() >= s.len() {
1857            self.put_slice(s.as_bytes());
1858            Ok(())
1859        } else {
1860            Err(fmt::Error)
1861        }
1862    }
1863
1864    #[inline]
1865    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1866        fmt::write(self, args)
1867    }
1868}
1869
1870impl Clone for BytesMut {
1871    #[inline]
1872    fn clone(&self) -> BytesMut {
1873        BytesMut::from(&self[..])
1874    }
1875}
1876
1877impl IntoIterator for BytesMut {
1878    type Item = u8;
1879    type IntoIter = IntoIter<BytesMut>;
1880
1881    fn into_iter(self) -> Self::IntoIter {
1882        IntoIter::new(self)
1883    }
1884}
1885
1886impl<'a> IntoIterator for &'a BytesMut {
1887    type Item = &'a u8;
1888    type IntoIter = std::slice::Iter<'a, u8>;
1889
1890    fn into_iter(self) -> Self::IntoIter {
1891        self.as_ref().iter()
1892    }
1893}
1894
1895impl FromIterator<u8> for BytesMut {
1896    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1897        let iter = into_iter.into_iter();
1898        let (min, maybe_max) = iter.size_hint();
1899
1900        let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min));
1901        for i in iter {
1902            out.reserve(1);
1903            out.put_u8(i);
1904        }
1905
1906        out
1907    }
1908}
1909
1910impl<'a> FromIterator<&'a u8> for BytesMut {
1911    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1912        into_iter.into_iter().copied().collect::<BytesMut>()
1913    }
1914}
1915
1916impl Extend<u8> for BytesMut {
1917    fn extend<T>(&mut self, iter: T)
1918    where
1919        T: IntoIterator<Item = u8>,
1920    {
1921        let iter = iter.into_iter();
1922
1923        let (lower, _) = iter.size_hint();
1924        self.reserve(lower);
1925
1926        for b in iter {
1927            self.put_u8(b);
1928        }
1929    }
1930}
1931
1932impl<'a> Extend<&'a u8> for BytesMut {
1933    fn extend<T>(&mut self, iter: T)
1934    where
1935        T: IntoIterator<Item = &'a u8>,
1936    {
1937        self.extend(iter.into_iter().copied())
1938    }
1939}
1940
1941/*
1942 *
1943 * ===== BytesVec =====
1944 *
1945 */
1946
1947impl BytesVec {
1948    /// Creates a new `BytesVec` with the specified capacity.
1949    ///
1950    /// The returned `BytesVec` will be able to hold at least `capacity` bytes
1951    /// without reallocating.
1952    ///
1953    /// It is important to note that this function does not specify the length
1954    /// of the returned `BytesVec`, but only the capacity.
1955    ///
1956    /// # Panics
1957    ///
1958    /// Panics if `capacity` greater than 60bit for 64bit systems
1959    /// and 28bit for 32bit systems
1960    ///
1961    /// # Examples
1962    ///
1963    /// ```
1964    /// use ntex_bytes::{BytesVec, BufMut};
1965    ///
1966    /// let mut bytes = BytesVec::with_capacity(64);
1967    ///
1968    /// // `bytes` contains no data, even though there is capacity
1969    /// assert_eq!(bytes.len(), 0);
1970    ///
1971    /// bytes.put(&b"hello world"[..]);
1972    ///
1973    /// assert_eq!(&bytes[..], b"hello world");
1974    /// ```
1975    #[inline]
1976    pub fn with_capacity(capacity: usize) -> BytesVec {
1977        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1978    }
1979
1980    /// Creates a new `BytesVec` with the specified capacity and in specified memory pool.
1981    ///
1982    /// # Examples
1983    ///
1984    /// ```
1985    /// use ntex_bytes::{BytesVec, BufMut, PoolId};
1986    ///
1987    /// let mut bytes = BytesVec::with_capacity_in(64, PoolId::P1);
1988    ///
1989    /// // `bytes` contains no data, even though there is capacity
1990    /// assert_eq!(bytes.len(), 0);
1991    ///
1992    /// bytes.put(&b"hello world"[..]);
1993    ///
1994    /// assert_eq!(&bytes[..], b"hello world");
1995    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
1996    /// ```
1997    #[inline]
1998    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesVec
1999    where
2000        PoolRef: From<T>,
2001    {
2002        BytesVec {
2003            inner: InnerVec::with_capacity(capacity, pool.into()),
2004        }
2005    }
2006
2007    /// Creates a new `BytesVec` from slice, by copying it.
2008    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
2009        Self::copy_from_slice_in(src, PoolId::DEFAULT)
2010    }
2011
2012    /// Creates a new `BytesVec` from slice, by copying it.
2013    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
2014    where
2015        T: AsRef<[u8]>,
2016        PoolRef: From<U>,
2017    {
2018        let s = src.as_ref();
2019        BytesVec {
2020            inner: InnerVec::from_slice(s.len(), s, pool.into()),
2021        }
2022    }
2023
2024    /// Creates a new `BytesVec` with default capacity.
2025    ///
2026    /// Resulting object has length 0 and unspecified capacity.
2027    /// This function does not allocate.
2028    ///
2029    /// # Examples
2030    ///
2031    /// ```
2032    /// use ntex_bytes::{BytesVec, BufMut};
2033    ///
2034    /// let mut bytes = BytesVec::new();
2035    ///
2036    /// assert_eq!(0, bytes.len());
2037    ///
2038    /// bytes.reserve(2);
2039    /// bytes.put_slice(b"xy");
2040    ///
2041    /// assert_eq!(&b"xy"[..], &bytes[..]);
2042    /// ```
2043    #[inline]
2044    pub fn new() -> BytesVec {
2045        BytesVec::with_capacity(MIN_NON_ZERO_CAP)
2046    }
2047
2048    /// Returns the number of bytes contained in this `BytesVec`.
2049    ///
2050    /// # Examples
2051    ///
2052    /// ```
2053    /// use ntex_bytes::BytesVec;
2054    ///
2055    /// let b = BytesVec::copy_from_slice(&b"hello"[..]);
2056    /// assert_eq!(b.len(), 5);
2057    /// ```
2058    #[inline]
2059    pub fn len(&self) -> usize {
2060        self.inner.len()
2061    }
2062
2063    /// Returns true if the `BytesVec` has a length of 0.
2064    ///
2065    /// # Examples
2066    ///
2067    /// ```
2068    /// use ntex_bytes::BytesVec;
2069    ///
2070    /// let b = BytesVec::with_capacity(64);
2071    /// assert!(b.is_empty());
2072    /// ```
2073    #[inline]
2074    pub fn is_empty(&self) -> bool {
2075        self.inner.len() == 0
2076    }
2077
2078    /// Returns the number of bytes the `BytesVec` can hold without reallocating.
2079    ///
2080    /// # Examples
2081    ///
2082    /// ```
2083    /// use ntex_bytes::BytesVec;
2084    ///
2085    /// let b = BytesVec::with_capacity(64);
2086    /// assert_eq!(b.capacity(), 64);
2087    /// ```
2088    #[inline]
2089    pub fn capacity(&self) -> usize {
2090        self.inner.capacity()
2091    }
2092
2093    /// Converts `self` into an immutable `Bytes`.
2094    ///
2095    /// The conversion is zero cost and is used to indicate that the slice
2096    /// referenced by the handle will no longer be mutated. Once the conversion
2097    /// is done, the handle can be cloned and shared across threads.
2098    ///
2099    /// # Examples
2100    ///
2101    /// ```
2102    /// use ntex_bytes::{BytesVec, BufMut};
2103    /// use std::thread;
2104    ///
2105    /// let mut b = BytesVec::with_capacity(64);
2106    /// b.put("hello world");
2107    /// let b1 = b.freeze();
2108    /// let b2 = b1.clone();
2109    ///
2110    /// let th = thread::spawn(move || {
2111    ///     assert_eq!(b1, b"hello world");
2112    /// });
2113    ///
2114    /// assert_eq!(b2, b"hello world");
2115    /// th.join().unwrap();
2116    /// ```
2117    #[inline]
2118    pub fn freeze(self) -> Bytes {
2119        Bytes {
2120            inner: self.inner.into_inner(),
2121        }
2122    }
2123
2124    /// Removes the bytes from the current view, returning them in a new
2125    /// `Bytes` instance.
2126    ///
2127    /// Afterwards, `self` will be empty, but will retain any additional
2128    /// capacity that it had before the operation. This is identical to
2129    /// `self.split_to(self.len())`.
2130    ///
2131    /// This is an `O(1)` operation that just increases the reference count and
2132    /// sets a few indices.
2133    ///
2134    /// # Examples
2135    ///
2136    /// ```
2137    /// use ntex_bytes::{BytesVec, BufMut};
2138    ///
2139    /// let mut buf = BytesVec::with_capacity(1024);
2140    /// buf.put(&b"hello world"[..]);
2141    ///
2142    /// let other = buf.split();
2143    ///
2144    /// assert!(buf.is_empty());
2145    /// assert_eq!(1013, buf.capacity());
2146    ///
2147    /// assert_eq!(other, b"hello world"[..]);
2148    /// ```
2149    pub fn split(&mut self) -> BytesMut {
2150        self.split_to(self.len())
2151    }
2152
2153    /// Splits the buffer into two at the given index.
2154    ///
2155    /// Afterwards `self` contains elements `[at, len)`, and the returned `Bytes`
2156    /// contains elements `[0, at)`.
2157    ///
2158    /// This is an `O(1)` operation that just increases the reference count and
2159    /// sets a few indices.
2160    ///
2161    /// # Examples
2162    ///
2163    /// ```
2164    /// use ntex_bytes::BytesVec;
2165    ///
2166    /// let mut a = BytesVec::copy_from_slice(&b"hello world"[..]);
2167    /// let mut b = a.split_to(5);
2168    ///
2169    /// a[0] = b'!';
2170    ///
2171    /// assert_eq!(&a[..], b"!world");
2172    /// assert_eq!(&b[..], b"hello");
2173    /// ```
2174    ///
2175    /// # Panics
2176    ///
2177    /// Panics if `at > len`.
2178    pub fn split_to(&mut self, at: usize) -> BytesMut {
2179        self.split_to_checked(at)
2180            .expect("at value must be <= self.len()`")
2181    }
2182
2183    /// Splits the bytes into two at the given index.
2184    ///
2185    /// Does nothing if `at > len`.
2186    pub fn split_to_checked(&mut self, at: usize) -> Option<BytesMut> {
2187        if at <= self.len() {
2188            Some(BytesMut {
2189                inner: self.inner.split_to(at, false),
2190            })
2191        } else {
2192            None
2193        }
2194    }
2195
2196    /// Shortens the buffer, keeping the first `len` bytes and dropping the
2197    /// rest.
2198    ///
2199    /// If `len` is greater than the buffer's current length, this has no
2200    /// effect.
2201    ///
2202    /// The [`split_off`] method can emulate `truncate`, but this causes the
2203    /// excess bytes to be returned instead of dropped.
2204    ///
2205    /// # Examples
2206    ///
2207    /// ```
2208    /// use ntex_bytes::BytesVec;
2209    ///
2210    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2211    /// buf.truncate(5);
2212    /// assert_eq!(buf, b"hello"[..]);
2213    /// ```
2214    ///
2215    /// [`split_off`]: #method.split_off
2216    pub fn truncate(&mut self, len: usize) {
2217        self.inner.truncate(len);
2218    }
2219
2220    /// Clears the buffer, removing all data.
2221    ///
2222    /// # Examples
2223    ///
2224    /// ```
2225    /// use ntex_bytes::BytesVec;
2226    ///
2227    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2228    /// buf.clear();
2229    /// assert!(buf.is_empty());
2230    /// ```
2231    pub fn clear(&mut self) {
2232        self.truncate(0);
2233    }
2234
2235    /// Resizes the buffer so that `len` is equal to `new_len`.
2236    ///
2237    /// If `new_len` is greater than `len`, the buffer is extended by the
2238    /// difference with each additional byte set to `value`. If `new_len` is
2239    /// less than `len`, the buffer is simply truncated.
2240    ///
2241    /// # Panics
2242    ///
2243    /// Panics if `new_len` greater than 60bit for 64bit systems
2244    /// and 28bit for 32bit systems
2245    ///
2246    /// # Examples
2247    ///
2248    /// ```
2249    /// use ntex_bytes::BytesVec;
2250    ///
2251    /// let mut buf = BytesVec::new();
2252    ///
2253    /// buf.resize(3, 0x1);
2254    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
2255    ///
2256    /// buf.resize(2, 0x2);
2257    /// assert_eq!(&buf[..], &[0x1, 0x1]);
2258    ///
2259    /// buf.resize(4, 0x3);
2260    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
2261    /// ```
2262    #[inline]
2263    pub fn resize(&mut self, new_len: usize, value: u8) {
2264        self.inner.resize(new_len, value);
2265    }
2266
2267    /// Sets the length of the buffer.
2268    ///
2269    /// This will explicitly set the size of the buffer without actually
2270    /// modifying the data, so it is up to the caller to ensure that the data
2271    /// has been initialized.
2272    ///
2273    /// # Examples
2274    ///
2275    /// ```
2276    /// use ntex_bytes::BytesVec;
2277    ///
2278    /// let mut b = BytesVec::copy_from_slice(&b"hello world"[..]);
2279    ///
2280    /// unsafe {
2281    ///     b.set_len(5);
2282    /// }
2283    ///
2284    /// assert_eq!(&b[..], b"hello");
2285    ///
2286    /// unsafe {
2287    ///     b.set_len(11);
2288    /// }
2289    ///
2290    /// assert_eq!(&b[..], b"hello world");
2291    /// ```
2292    ///
2293    /// # Panics
2294    ///
2295    /// This method will panic if `len` is out of bounds for the underlying
2296    /// slice or if it comes after the `end` of the configured window.
2297    #[inline]
2298    #[allow(clippy::missing_safety_doc)]
2299    pub unsafe fn set_len(&mut self, len: usize) {
2300        self.inner.set_len(len)
2301    }
2302
2303    /// Reserves capacity for at least `additional` more bytes to be inserted
2304    /// into the given `BytesVec`.
2305    ///
2306    /// More than `additional` bytes may be reserved in order to avoid frequent
2307    /// reallocations. A call to `reserve` may result in an allocation.
2308    ///
2309    /// Before allocating new buffer space, the function will attempt to reclaim
2310    /// space in the existing buffer. If the current handle references a small
2311    /// view in the original buffer and all other handles have been dropped,
2312    /// and the requested capacity is less than or equal to the existing
2313    /// buffer's capacity, then the current view will be copied to the front of
2314    /// the buffer and the handle will take ownership of the full buffer.
2315    ///
2316    /// # Panics
2317    ///
2318    /// Panics if new capacity is greater than 60bit for 64bit systems
2319    /// and 28bit for 32bit systems
2320    ///
2321    /// # Examples
2322    ///
2323    /// In the following example, a new buffer is allocated.
2324    ///
2325    /// ```
2326    /// use ntex_bytes::BytesVec;
2327    ///
2328    /// let mut buf = BytesVec::copy_from_slice(&b"hello"[..]);
2329    /// buf.reserve(64);
2330    /// assert!(buf.capacity() >= 69);
2331    /// ```
2332    ///
2333    /// In the following example, the existing buffer is reclaimed.
2334    ///
2335    /// ```
2336    /// use ntex_bytes::{BytesVec, BufMut};
2337    ///
2338    /// let mut buf = BytesVec::with_capacity(128);
2339    /// buf.put(&[0; 64][..]);
2340    ///
2341    /// let ptr = buf.as_ptr();
2342    /// let other = buf.split();
2343    ///
2344    /// assert!(buf.is_empty());
2345    /// assert_eq!(buf.capacity(), 64);
2346    ///
2347    /// drop(other);
2348    /// buf.reserve(128);
2349    ///
2350    /// assert_eq!(buf.capacity(), 128);
2351    /// assert_eq!(buf.as_ptr(), ptr);
2352    /// ```
2353    ///
2354    /// # Panics
2355    ///
2356    /// Panics if the new capacity overflows `usize`.
2357    #[inline]
2358    pub fn reserve(&mut self, additional: usize) {
2359        let len = self.len();
2360        let rem = self.capacity() - len;
2361
2362        if additional <= rem {
2363            // The handle can already store at least `additional` more bytes, so
2364            // there is no further work needed to be done.
2365            return;
2366        }
2367
2368        self.inner.reserve_inner(additional);
2369    }
2370
2371    /// Appends given bytes to this object.
2372    ///
2373    /// If this `BytesVec` object has not enough capacity, it is resized first.
2374    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
2375    ///
2376    /// # Examples
2377    ///
2378    /// ```
2379    /// use ntex_bytes::BytesVec;
2380    ///
2381    /// let mut buf = BytesVec::with_capacity(0);
2382    /// buf.extend_from_slice(b"aaabbb");
2383    /// buf.extend_from_slice(b"cccddd");
2384    ///
2385    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
2386    /// ```
2387    #[inline]
2388    pub fn extend_from_slice(&mut self, extend: &[u8]) {
2389        self.put_slice(extend);
2390    }
2391
2392    /// Run provided function with `BytesMut` instance that contains current data.
2393    #[inline]
2394    pub fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2395    where
2396        F: FnOnce(&mut BytesMut) -> R,
2397    {
2398        self.inner.with_bytes_mut(f)
2399    }
2400
2401    /// Returns an iterator over the bytes contained by the buffer.
2402    ///
2403    /// # Examples
2404    ///
2405    /// ```
2406    /// use ntex_bytes::{Buf, BytesVec};
2407    ///
2408    /// let buf = BytesVec::copy_from_slice(&b"abc"[..]);
2409    /// let mut iter = buf.iter();
2410    ///
2411    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
2412    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
2413    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
2414    /// assert_eq!(iter.next(), None);
2415    /// ```
2416    #[inline]
2417    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
2418        self.chunk().iter()
2419    }
2420
2421    #[cfg(feature = "mpool")]
2422    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
2423        self.inner.move_to_pool(pool);
2424    }
2425}
2426
2427impl Buf for BytesVec {
2428    #[inline]
2429    fn remaining(&self) -> usize {
2430        self.len()
2431    }
2432
2433    #[inline]
2434    fn chunk(&self) -> &[u8] {
2435        self.inner.as_ref()
2436    }
2437
2438    #[inline]
2439    fn advance(&mut self, cnt: usize) {
2440        assert!(
2441            cnt <= self.inner.as_ref().len(),
2442            "cannot advance past `remaining`"
2443        );
2444        unsafe {
2445            self.inner.set_start(cnt as u32);
2446        }
2447    }
2448}
2449
2450impl BufMut for BytesVec {
2451    #[inline]
2452    fn remaining_mut(&self) -> usize {
2453        self.capacity() - self.len()
2454    }
2455
2456    #[inline]
2457    unsafe fn advance_mut(&mut self, cnt: usize) {
2458        let new_len = self.len() + cnt;
2459
2460        // This call will panic if `cnt` is too big
2461        self.inner.set_len(new_len);
2462    }
2463
2464    #[inline]
2465    fn chunk_mut(&mut self) -> &mut UninitSlice {
2466        let len = self.len();
2467
2468        unsafe {
2469            // This will never panic as `len` can never become invalid
2470            let ptr = &mut self.inner.as_raw()[len..];
2471
2472            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
2473        }
2474    }
2475
2476    #[inline]
2477    fn put_slice(&mut self, src: &[u8]) {
2478        let len = src.len();
2479        self.reserve(len);
2480
2481        unsafe {
2482            ptr::copy_nonoverlapping(src.as_ptr(), self.chunk_mut().as_mut_ptr(), len);
2483            self.advance_mut(len);
2484        }
2485    }
2486
2487    #[inline]
2488    fn put_u8(&mut self, n: u8) {
2489        self.reserve(1);
2490        self.inner.put_u8(n);
2491    }
2492
2493    #[inline]
2494    fn put_i8(&mut self, n: i8) {
2495        self.reserve(1);
2496        self.put_u8(n as u8);
2497    }
2498}
2499
2500impl AsRef<[u8]> for BytesVec {
2501    #[inline]
2502    fn as_ref(&self) -> &[u8] {
2503        self.inner.as_ref()
2504    }
2505}
2506
2507impl AsMut<[u8]> for BytesVec {
2508    #[inline]
2509    fn as_mut(&mut self) -> &mut [u8] {
2510        self.inner.as_mut()
2511    }
2512}
2513
2514impl Deref for BytesVec {
2515    type Target = [u8];
2516
2517    #[inline]
2518    fn deref(&self) -> &[u8] {
2519        self.as_ref()
2520    }
2521}
2522
2523impl DerefMut for BytesVec {
2524    #[inline]
2525    fn deref_mut(&mut self) -> &mut [u8] {
2526        self.inner.as_mut()
2527    }
2528}
2529
2530impl Eq for BytesVec {}
2531
2532impl PartialEq for BytesVec {
2533    #[inline]
2534    fn eq(&self, other: &BytesVec) -> bool {
2535        self.inner.as_ref() == other.inner.as_ref()
2536    }
2537}
2538
2539impl Default for BytesVec {
2540    #[inline]
2541    fn default() -> BytesVec {
2542        BytesVec::new()
2543    }
2544}
2545
2546impl Borrow<[u8]> for BytesVec {
2547    #[inline]
2548    fn borrow(&self) -> &[u8] {
2549        self.as_ref()
2550    }
2551}
2552
2553impl BorrowMut<[u8]> for BytesVec {
2554    #[inline]
2555    fn borrow_mut(&mut self) -> &mut [u8] {
2556        self.as_mut()
2557    }
2558}
2559
2560impl fmt::Debug for BytesVec {
2561    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2562        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
2563    }
2564}
2565
2566impl fmt::Write for BytesVec {
2567    #[inline]
2568    fn write_str(&mut self, s: &str) -> fmt::Result {
2569        if self.remaining_mut() >= s.len() {
2570            self.put_slice(s.as_bytes());
2571            Ok(())
2572        } else {
2573            Err(fmt::Error)
2574        }
2575    }
2576
2577    #[inline]
2578    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
2579        fmt::write(self, args)
2580    }
2581}
2582
2583impl IntoIterator for BytesVec {
2584    type Item = u8;
2585    type IntoIter = IntoIter<BytesVec>;
2586
2587    fn into_iter(self) -> Self::IntoIter {
2588        IntoIter::new(self)
2589    }
2590}
2591
2592impl<'a> IntoIterator for &'a BytesVec {
2593    type Item = &'a u8;
2594    type IntoIter = std::slice::Iter<'a, u8>;
2595
2596    fn into_iter(self) -> Self::IntoIter {
2597        self.as_ref().iter()
2598    }
2599}
2600
2601impl FromIterator<u8> for BytesVec {
2602    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
2603        let iter = into_iter.into_iter();
2604        let (min, maybe_max) = iter.size_hint();
2605
2606        let mut out = BytesVec::with_capacity(maybe_max.unwrap_or(min));
2607        for i in iter {
2608            out.reserve(1);
2609            out.put_u8(i);
2610        }
2611
2612        out
2613    }
2614}
2615
2616impl<'a> FromIterator<&'a u8> for BytesVec {
2617    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
2618        into_iter.into_iter().copied().collect::<BytesVec>()
2619    }
2620}
2621
2622impl Extend<u8> for BytesVec {
2623    fn extend<T>(&mut self, iter: T)
2624    where
2625        T: IntoIterator<Item = u8>,
2626    {
2627        let iter = iter.into_iter();
2628
2629        let (lower, _) = iter.size_hint();
2630        self.reserve(lower);
2631
2632        for b in iter {
2633            self.put_u8(b);
2634        }
2635    }
2636}
2637
2638impl<'a> Extend<&'a u8> for BytesVec {
2639    fn extend<T>(&mut self, iter: T)
2640    where
2641        T: IntoIterator<Item = &'a u8>,
2642    {
2643        self.extend(iter.into_iter().copied())
2644    }
2645}
2646
2647struct InnerVec(NonNull<SharedVec>);
2648
2649impl InnerVec {
2650    #[inline]
2651    fn with_capacity(capacity: usize, pool: PoolRef) -> InnerVec {
2652        Self::from_slice(capacity, &[], pool)
2653    }
2654
2655    #[inline]
2656    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> InnerVec {
2657        // vec must be aligned to SharedVec instead of u8
2658        let vec_cap = if cap % SHARED_VEC_SIZE != 0 {
2659            (cap / SHARED_VEC_SIZE) + 2
2660        } else {
2661            (cap / SHARED_VEC_SIZE) + 1
2662        };
2663        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
2664        unsafe {
2665            // Store data in vec
2666            let len = src.len() as u32;
2667            let cap = vec.capacity() * SHARED_VEC_SIZE;
2668            let shared_ptr = vec.as_mut_ptr();
2669            mem::forget(vec);
2670            pool.acquire(cap);
2671
2672            let ptr = shared_ptr.add(1) as *mut u8;
2673            if !src.is_empty() {
2674                ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
2675            }
2676            ptr::write(
2677                shared_ptr,
2678                SharedVec {
2679                    len,
2680                    cap,
2681                    pool,
2682                    ref_count: AtomicUsize::new(1),
2683                    offset: SHARED_VEC_SIZE as u32,
2684                },
2685            );
2686
2687            InnerVec(NonNull::new_unchecked(shared_ptr))
2688        }
2689    }
2690
2691    #[cfg(feature = "mpool")]
2692    #[inline]
2693    fn move_to_pool(&mut self, pool: PoolRef) {
2694        unsafe {
2695            let inner = self.as_inner();
2696            if pool != inner.pool {
2697                pool.acquire(inner.cap);
2698                let pool = mem::replace(&mut inner.pool, pool);
2699                pool.release(inner.cap);
2700            }
2701        }
2702    }
2703
2704    /// Return a slice for the handle's view into the shared buffer
2705    #[inline]
2706    fn as_ref(&self) -> &[u8] {
2707        unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
2708    }
2709
2710    /// Return a mutable slice for the handle's view into the shared buffer
2711    #[inline]
2712    fn as_mut(&mut self) -> &mut [u8] {
2713        unsafe { slice::from_raw_parts_mut(self.as_ptr(), self.len()) }
2714    }
2715
2716    /// Return a mutable slice for the handle's view into the shared buffer
2717    /// including potentially uninitialized bytes.
2718    #[inline]
2719    unsafe fn as_raw(&mut self) -> &mut [u8] {
2720        slice::from_raw_parts_mut(self.as_ptr(), self.capacity())
2721    }
2722
2723    /// Return a raw pointer to data
2724    #[inline]
2725    unsafe fn as_ptr(&self) -> *mut u8 {
2726        (self.0.as_ptr() as *mut u8).add((*self.0.as_ptr()).offset as usize)
2727    }
2728
2729    #[inline]
2730    unsafe fn as_inner(&mut self) -> &mut SharedVec {
2731        self.0.as_mut()
2732    }
2733
2734    /// Insert a byte into the next slot and advance the len by 1.
2735    #[inline]
2736    fn put_u8(&mut self, n: u8) {
2737        unsafe {
2738            let inner = self.as_inner();
2739            let len = inner.len as usize;
2740            assert!(len < (inner.cap - inner.offset as usize));
2741            inner.len += 1;
2742            *self.as_ptr().add(len) = n;
2743        }
2744    }
2745
2746    #[inline]
2747    fn len(&self) -> usize {
2748        unsafe { (*self.0.as_ptr()).len as usize }
2749    }
2750
2751    /// slice.
2752    #[inline]
2753    unsafe fn set_len(&mut self, len: usize) {
2754        let inner = self.as_inner();
2755        assert!(len <= (inner.cap - inner.offset as usize) && len < u32::MAX as usize);
2756        inner.len = len as u32;
2757    }
2758
2759    #[inline]
2760    fn capacity(&self) -> usize {
2761        unsafe { (*self.0.as_ptr()).cap - (*self.0.as_ptr()).offset as usize }
2762    }
2763
2764    fn into_inner(mut self) -> Inner {
2765        unsafe {
2766            let ptr = self.as_ptr();
2767
2768            if self.len() <= INLINE_CAP {
2769                Inner::from_ptr_inline(ptr, self.len())
2770            } else {
2771                let inner = self.as_inner();
2772
2773                let inner = Inner {
2774                    ptr,
2775                    len: inner.len as usize,
2776                    cap: inner.cap - inner.offset as usize,
2777                    arc: NonNull::new_unchecked(
2778                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2779                    ),
2780                };
2781                mem::forget(self);
2782                inner
2783            }
2784        }
2785    }
2786
2787    fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2788    where
2789        F: FnOnce(&mut BytesMut) -> R,
2790    {
2791        unsafe {
2792            // create Inner for BytesMut
2793            let ptr = self.as_ptr();
2794            let inner = self.as_inner();
2795            let inner = Inner {
2796                ptr,
2797                len: inner.len as usize,
2798                cap: inner.cap - inner.offset as usize,
2799                arc: NonNull::new_unchecked(
2800                    (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2801                ),
2802            };
2803
2804            // run function
2805            let mut buf = BytesMut { inner };
2806            let result = f(&mut buf);
2807
2808            // convert BytesMut back to InnerVec
2809            let kind = buf.inner.kind();
2810            let new_inner =
2811                // only KIND_VEC could be converted to self, otherwise we have to copy data
2812                if kind == KIND_INLINE || kind == KIND_STATIC || kind == KIND_ARC {
2813                    InnerVec::from_slice(
2814                        buf.inner.capacity(),
2815                        buf.inner.as_ref(),
2816                        buf.inner.pool(),
2817                    )
2818                } else if kind == KIND_VEC {
2819                    let ptr = buf.inner.shared_vec();
2820                    let offset = buf.inner.ptr as usize - ptr as usize;
2821
2822                    // we cannot use shared vec if BytesMut points to inside of vec
2823                    if buf.inner.cap < (*ptr).cap - offset {
2824                        InnerVec::from_slice(
2825                            buf.inner.capacity(),
2826                            buf.inner.as_ref(),
2827                            buf.inner.pool(),
2828                        )
2829                    } else {
2830                        // BytesMut owns rest of the vec, so re-use
2831                        (*ptr).len = buf.len() as u32;
2832                        (*ptr).offset = offset as u32;
2833                        let inner = InnerVec(NonNull::new_unchecked(ptr));
2834                        mem::forget(buf); // reuse bytes
2835                        inner
2836                    }
2837                } else {
2838                    panic!()
2839                };
2840
2841            // drop old inner, we cannot drop because BytesMut used it
2842            let old = mem::replace(self, new_inner);
2843            mem::forget(old);
2844
2845            result
2846        }
2847    }
2848
2849    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
2850        unsafe {
2851            let ptr = self.as_ptr();
2852
2853            let other = if create_inline && at <= INLINE_CAP {
2854                Inner::from_ptr_inline(ptr, at)
2855            } else {
2856                let inner = self.as_inner();
2857                let old_size = inner.ref_count.fetch_add(1, Relaxed);
2858                if old_size == usize::MAX {
2859                    abort();
2860                }
2861
2862                Inner {
2863                    ptr,
2864                    len: at,
2865                    cap: at,
2866                    arc: NonNull::new_unchecked(
2867                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2868                    ),
2869                }
2870            };
2871            self.set_start(at as u32);
2872
2873            other
2874        }
2875    }
2876
2877    fn truncate(&mut self, len: usize) {
2878        unsafe {
2879            // try to reclaim the buffer. This is possible if the current
2880            // handle is the only outstanding handle pointing to the buffer.
2881            if len == 0 {
2882                let inner = self.as_inner();
2883                if inner.is_unique() && inner.offset != SHARED_VEC_SIZE as u32 {
2884                    inner.offset = SHARED_VEC_SIZE as u32;
2885                }
2886            }
2887
2888            if len <= self.len() {
2889                self.set_len(len);
2890            }
2891        }
2892    }
2893
2894    fn resize(&mut self, new_len: usize, value: u8) {
2895        let len = self.len();
2896        if new_len > len {
2897            let additional = new_len - len;
2898            self.reserve(additional);
2899            unsafe {
2900                let dst = self.as_raw()[len..].as_mut_ptr();
2901                ptr::write_bytes(dst, value, additional);
2902                self.set_len(new_len);
2903            }
2904        } else {
2905            self.truncate(new_len);
2906        }
2907    }
2908
2909    #[inline]
2910    fn reserve(&mut self, additional: usize) {
2911        let len = self.len();
2912        let rem = self.capacity() - len;
2913
2914        if additional <= rem {
2915            // The handle can already store at least `additional` more bytes, so
2916            // there is no further work needed to be done.
2917            return;
2918        }
2919
2920        self.reserve_inner(additional)
2921    }
2922
2923    #[inline]
2924    // In separate function to allow the short-circuits in `reserve` to
2925    // be inline-able. Significant helps performance.
2926    fn reserve_inner(&mut self, additional: usize) {
2927        let len = self.len();
2928
2929        // Reserving involves abandoning the currently shared buffer and
2930        // allocating a new vector with the requested capacity.
2931        let new_cap = len + additional;
2932
2933        unsafe {
2934            let inner = self.as_inner();
2935            let vec_cap = inner.cap - SHARED_VEC_SIZE;
2936
2937            // try to reclaim the buffer. This is possible if the current
2938            // handle is the only outstanding handle pointing to the buffer.
2939            if inner.is_unique() && vec_cap >= new_cap {
2940                let offset = inner.offset;
2941                inner.offset = SHARED_VEC_SIZE as u32;
2942
2943                // The capacity is sufficient, reclaim the buffer
2944                let src = (self.0.as_ptr() as *mut u8).add(offset as usize);
2945                let dst = (self.0.as_ptr() as *mut u8).add(SHARED_VEC_SIZE);
2946                ptr::copy(src, dst, len);
2947            } else {
2948                // Create a new vector storage
2949                let pool = inner.pool;
2950                *self = InnerVec::from_slice(new_cap, self.as_ref(), pool);
2951            }
2952        }
2953    }
2954
2955    unsafe fn set_start(&mut self, start: u32) {
2956        // Setting the start to 0 is a no-op, so return early if this is the
2957        // case.
2958        if start == 0 {
2959            return;
2960        }
2961
2962        let inner = self.as_inner();
2963        assert!(start <= inner.cap as u32);
2964
2965        // Updating the start of the view is setting `offset` to point to the
2966        // new start and updating the `len` field to reflect the new length
2967        // of the view.
2968        inner.offset += start;
2969
2970        if inner.len >= start {
2971            inner.len -= start;
2972        } else {
2973            inner.len = 0;
2974        }
2975    }
2976}
2977
2978impl Drop for InnerVec {
2979    fn drop(&mut self) {
2980        release_shared_vec(self.0.as_ptr());
2981    }
2982}
2983
2984/*
2985 *
2986 * ===== Inner =====
2987 *
2988 */
2989
2990impl Inner {
2991    #[inline]
2992    const fn from_static(bytes: &'static [u8]) -> Inner {
2993        let ptr = bytes.as_ptr() as *mut u8;
2994
2995        Inner {
2996            // `arc` won't ever store a pointer. Instead, use it to
2997            // track the fact that the `Bytes` handle is backed by a
2998            // static buffer.
2999            arc: unsafe { NonNull::new_unchecked(KIND_STATIC as *mut Shared) },
3000            ptr,
3001            len: bytes.len(),
3002            cap: bytes.len(),
3003        }
3004    }
3005
3006    #[inline]
3007    const fn empty_inline() -> Inner {
3008        Inner {
3009            arc: unsafe { NonNull::new_unchecked(KIND_INLINE as *mut Shared) },
3010            ptr: 0 as *mut u8,
3011            len: 0,
3012            cap: 0,
3013        }
3014    }
3015
3016    #[inline]
3017    fn from_vec(mut vec: Vec<u8>, pool: PoolRef) -> Inner {
3018        let len = vec.len();
3019        let cap = vec.capacity();
3020        let ptr = vec.as_mut_ptr();
3021        pool.acquire(cap);
3022
3023        // Store data in arc
3024        let shared = Box::into_raw(Box::new(Shared {
3025            vec,
3026            pool,
3027            ref_count: AtomicUsize::new(1),
3028        }));
3029
3030        // The pointer should be aligned, so this assert should always succeed.
3031        debug_assert!(0 == (shared as usize & KIND_MASK));
3032
3033        // Create new arc, so atomic operations can be avoided.
3034        Inner {
3035            ptr,
3036            len,
3037            cap,
3038            arc: unsafe { NonNull::new_unchecked(shared) },
3039        }
3040    }
3041
3042    #[inline]
3043    fn with_capacity(capacity: usize, pool: PoolRef) -> Inner {
3044        Inner::from_slice(capacity, &[], pool)
3045    }
3046
3047    #[inline]
3048    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> Inner {
3049        // vec must be aligned to SharedVec instead of u8
3050        let mut vec_cap = (cap / SHARED_VEC_SIZE) + 1;
3051        if cap % SHARED_VEC_SIZE != 0 {
3052            vec_cap += 1;
3053        }
3054        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
3055
3056        // Store data in vec
3057        let len = src.len();
3058        let full_cap = vec.capacity() * SHARED_VEC_SIZE;
3059        let cap = full_cap - SHARED_VEC_SIZE;
3060        vec.push(SharedVec {
3061            pool,
3062            cap: full_cap,
3063            ref_count: AtomicUsize::new(1),
3064            len: 0,
3065            offset: 0,
3066        });
3067        pool.acquire(full_cap);
3068
3069        let shared_ptr = vec.as_mut_ptr();
3070        mem::forget(vec);
3071
3072        let (ptr, arc) = unsafe {
3073            let ptr = shared_ptr.add(1) as *mut u8;
3074            ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
3075            let arc =
3076                NonNull::new_unchecked((shared_ptr as usize ^ KIND_VEC) as *mut Shared);
3077            (ptr, arc)
3078        };
3079
3080        // Create new arc, so atomic operations can be avoided.
3081        Inner { len, cap, ptr, arc }
3082    }
3083
3084    #[inline]
3085    fn from_slice_inline(src: &[u8]) -> Inner {
3086        unsafe { Inner::from_ptr_inline(src.as_ptr(), src.len()) }
3087    }
3088
3089    #[inline]
3090    unsafe fn from_ptr_inline(src: *const u8, len: usize) -> Inner {
3091        let mut inner = Inner {
3092            arc: NonNull::new_unchecked(KIND_INLINE as *mut Shared),
3093            ptr: ptr::null_mut(),
3094            len: 0,
3095            cap: 0,
3096        };
3097
3098        let dst = inner.inline_ptr();
3099        ptr::copy(src, dst, len);
3100        inner.set_inline_len(len);
3101        inner
3102    }
3103
3104    #[inline]
3105    fn pool(&self) -> PoolRef {
3106        let kind = self.kind();
3107
3108        if kind == KIND_VEC {
3109            unsafe { (*self.shared_vec()).pool }
3110        } else if kind == KIND_ARC {
3111            unsafe { (*self.arc.as_ptr()).pool }
3112        } else {
3113            PoolId::DEFAULT.pool_ref()
3114        }
3115    }
3116
3117    #[cfg(feature = "mpool")]
3118    #[inline]
3119    fn move_to_pool(&mut self, pool: PoolRef) {
3120        let kind = self.kind();
3121
3122        if kind == KIND_VEC {
3123            let vec = self.shared_vec();
3124            unsafe {
3125                let cap = (*vec).cap;
3126                pool.acquire(cap);
3127                let pool = mem::replace(&mut (*vec).pool, pool);
3128                pool.release(cap);
3129            }
3130        } else if kind == KIND_ARC {
3131            let arc = self.arc.as_ptr();
3132            unsafe {
3133                let cap = (*arc).vec.capacity();
3134                pool.acquire(cap);
3135                let pool = mem::replace(&mut (*arc).pool, pool);
3136                pool.release(cap);
3137            }
3138        }
3139    }
3140
3141    /// Return a slice for the handle's view into the shared buffer
3142    #[inline]
3143    fn as_ref(&self) -> &[u8] {
3144        unsafe {
3145            if self.is_inline() {
3146                slice::from_raw_parts(self.inline_ptr_ro(), self.inline_len())
3147            } else {
3148                slice::from_raw_parts(self.ptr, self.len)
3149            }
3150        }
3151    }
3152
3153    /// Return a mutable slice for the handle's view into the shared buffer
3154    #[inline]
3155    fn as_mut(&mut self) -> &mut [u8] {
3156        debug_assert!(!self.is_static());
3157
3158        unsafe {
3159            if self.is_inline() {
3160                slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
3161            } else {
3162                slice::from_raw_parts_mut(self.ptr, self.len)
3163            }
3164        }
3165    }
3166
3167    /// Return a mutable slice for the handle's view into the shared buffer
3168    /// including potentially uninitialized bytes.
3169    #[inline]
3170    unsafe fn as_raw(&mut self) -> &mut [u8] {
3171        debug_assert!(!self.is_static());
3172
3173        if self.is_inline() {
3174            slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
3175        } else {
3176            slice::from_raw_parts_mut(self.ptr, self.cap)
3177        }
3178    }
3179
3180    /// Return a raw pointer to data
3181    #[inline]
3182    unsafe fn as_ptr(&mut self) -> *mut u8 {
3183        if self.is_inline() {
3184            self.inline_ptr()
3185        } else {
3186            self.ptr
3187        }
3188    }
3189
3190    /// Insert a byte into the next slot and advance the len by 1.
3191    #[inline]
3192    fn put_u8(&mut self, n: u8) {
3193        if self.is_inline() {
3194            let len = self.inline_len();
3195            assert!(len < INLINE_CAP);
3196            unsafe {
3197                *self.inline_ptr().add(len) = n;
3198            }
3199            self.set_inline_len(len + 1);
3200        } else {
3201            assert!(self.len < self.cap);
3202            unsafe {
3203                *self.ptr.add(self.len) = n;
3204            }
3205            self.len += 1;
3206        }
3207    }
3208
3209    #[inline]
3210    fn len(&self) -> usize {
3211        if self.is_inline() {
3212            self.inline_len()
3213        } else {
3214            self.len
3215        }
3216    }
3217
3218    /// Pointer to the start of the inline buffer
3219    #[inline]
3220    unsafe fn inline_ptr(&mut self) -> *mut u8 {
3221        (self as *mut Inner as *mut u8).offset(INLINE_DATA_OFFSET)
3222    }
3223
3224    /// Pointer to the start of the inline buffer
3225    #[inline]
3226    unsafe fn inline_ptr_ro(&self) -> *const u8 {
3227        (self as *const Inner as *const u8).offset(INLINE_DATA_OFFSET)
3228    }
3229
3230    #[inline]
3231    fn inline_len(&self) -> usize {
3232        // This is undefind behavior due to a data race, but experimental
3233        // evidence shows that it works in practice (discussion:
3234        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3235        (self.arc.as_ptr() as usize & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
3236    }
3237
3238    /// Set the length of the inline buffer. This is done by writing to the
3239    /// least significant byte of the `arc` field.
3240    #[inline]
3241    fn set_inline_len(&mut self, len: usize) {
3242        debug_assert!(len <= INLINE_CAP);
3243        self.arc = unsafe {
3244            NonNull::new_unchecked(
3245                ((self.arc.as_ptr() as usize & !INLINE_LEN_MASK)
3246                    | (len << INLINE_LEN_OFFSET)) as _,
3247            )
3248        };
3249    }
3250
3251    /// slice.
3252    #[inline]
3253    unsafe fn set_len(&mut self, len: usize) {
3254        if self.is_inline() {
3255            assert!(len <= INLINE_CAP);
3256            self.set_inline_len(len);
3257        } else {
3258            assert!(len <= self.cap);
3259            self.len = len;
3260        }
3261    }
3262
3263    #[inline]
3264    fn is_empty(&self) -> bool {
3265        self.len() == 0
3266    }
3267
3268    #[inline]
3269    fn capacity(&self) -> usize {
3270        if self.is_inline() {
3271            INLINE_CAP
3272        } else {
3273            self.cap
3274        }
3275    }
3276
3277    fn split_off(&mut self, at: usize, create_inline: bool) -> Inner {
3278        let other = unsafe {
3279            if create_inline && self.len() - at <= INLINE_CAP {
3280                Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at)
3281            } else {
3282                let mut other = self.shallow_clone();
3283                other.set_start(at);
3284                other
3285            }
3286        };
3287        unsafe {
3288            if create_inline && at <= INLINE_CAP {
3289                *self = Inner::from_ptr_inline(self.as_ptr(), at);
3290            } else {
3291                self.set_end(at);
3292            }
3293        }
3294
3295        other
3296    }
3297
3298    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
3299        let other = unsafe {
3300            if create_inline && at <= INLINE_CAP {
3301                Inner::from_ptr_inline(self.as_ptr(), at)
3302            } else {
3303                let mut other = self.shallow_clone();
3304                other.set_end(at);
3305                other
3306            }
3307        };
3308        unsafe {
3309            if create_inline && self.len() - at <= INLINE_CAP {
3310                *self = Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at);
3311            } else {
3312                self.set_start(at);
3313            }
3314        }
3315
3316        other
3317    }
3318
3319    fn truncate(&mut self, len: usize, create_inline: bool) {
3320        unsafe {
3321            if len <= self.len() {
3322                if create_inline && len < INLINE_CAP {
3323                    *self = Inner::from_ptr_inline(self.as_ptr(), len);
3324                } else {
3325                    self.set_len(len);
3326                }
3327            }
3328        }
3329    }
3330
3331    fn resize(&mut self, new_len: usize, value: u8) {
3332        let len = self.len();
3333        if new_len > len {
3334            let additional = new_len - len;
3335            self.reserve(additional);
3336            unsafe {
3337                let dst = self.as_raw()[len..].as_mut_ptr();
3338                ptr::write_bytes(dst, value, additional);
3339                self.set_len(new_len);
3340            }
3341        } else {
3342            self.truncate(new_len, false);
3343        }
3344    }
3345
3346    unsafe fn set_start(&mut self, start: usize) {
3347        // Setting the start to 0 is a no-op, so return early if this is the
3348        // case.
3349        if start == 0 {
3350            return;
3351        }
3352
3353        let kind = self.kind();
3354
3355        // Always check `inline` first, because if the handle is using inline
3356        // data storage, all of the `Inner` struct fields will be gibberish.
3357        if kind == KIND_INLINE {
3358            assert!(start <= INLINE_CAP);
3359
3360            let len = self.inline_len();
3361            if len <= start {
3362                self.set_inline_len(0);
3363            } else {
3364                // `set_start` is essentially shifting data off the front of the
3365                // view. Inlined buffers only track the length of the slice.
3366                // So, to update the start, the data at the new starting point
3367                // is copied to the beginning of the buffer.
3368                let new_len = len - start;
3369
3370                let dst = self.inline_ptr();
3371                let src = (dst as *const u8).add(start);
3372
3373                ptr::copy(src, dst, new_len);
3374
3375                self.set_inline_len(new_len);
3376            }
3377        } else {
3378            assert!(start <= self.cap);
3379
3380            // Updating the start of the view is setting `ptr` to point to the
3381            // new start and updating the `len` field to reflect the new length
3382            // of the view.
3383            self.ptr = self.ptr.add(start);
3384
3385            if self.len >= start {
3386                self.len -= start;
3387            } else {
3388                self.len = 0;
3389            }
3390
3391            self.cap -= start;
3392        }
3393    }
3394
3395    unsafe fn set_end(&mut self, end: usize) {
3396        // Always check `inline` first, because if the handle is using inline
3397        // data storage, all of the `Inner` struct fields will be gibberish.
3398        if self.is_inline() {
3399            assert!(end <= INLINE_CAP);
3400            let new_len = cmp::min(self.inline_len(), end);
3401            self.set_inline_len(new_len);
3402        } else {
3403            assert!(end <= self.cap);
3404
3405            self.cap = end;
3406            self.len = cmp::min(self.len, end);
3407        }
3408    }
3409
3410    /// Checks if it is safe to mutate the memory
3411    fn is_mut_safe(&self) -> bool {
3412        let kind = self.kind();
3413
3414        // Always check `inline` first, because if the handle is using inline
3415        // data storage, all of the `Inner` struct fields will be gibberish.
3416        if kind == KIND_INLINE {
3417            // Inlined buffers can always be mutated as the data is never shared
3418            // across handles.
3419            true
3420        } else if kind == KIND_STATIC {
3421            false
3422        } else if kind == KIND_VEC {
3423            // Otherwise, the underlying buffer is potentially shared with other
3424            // handles, so the ref_count needs to be checked.
3425            unsafe { (*self.shared_vec()).is_unique() }
3426        } else {
3427            // Otherwise, the underlying buffer is potentially shared with other
3428            // handles, so the ref_count needs to be checked.
3429            unsafe { (*self.arc.as_ptr()).is_unique() }
3430        }
3431    }
3432
3433    /// Increments the ref count. This should only be done if it is known that
3434    /// it can be done safely. As such, this fn is not public, instead other
3435    /// fns will use this one while maintaining the guarantees.
3436    /// Parameter `mut_self` should only be set to `true` if caller holds
3437    /// `&mut self` reference.
3438    ///
3439    /// "Safely" is defined as not exposing two `BytesMut` values that point to
3440    /// the same byte window.
3441    ///
3442    /// This function is thread safe.
3443    unsafe fn shallow_clone(&self) -> Inner {
3444        // Always check `inline` first, because if the handle is using inline
3445        // data storage, all of the `Inner` struct fields will be gibberish.
3446        //
3447        // Additionally, if kind is STATIC, then Arc is *never* changed, making
3448        // it safe and faster to check for it now before an atomic acquire.
3449
3450        if self.is_inline_or_static() {
3451            // In this case, a shallow_clone still involves copying the data.
3452            let mut inner: mem::MaybeUninit<Inner> = mem::MaybeUninit::uninit();
3453            ptr::copy_nonoverlapping(self, inner.as_mut_ptr(), 1);
3454            inner.assume_init()
3455        } else {
3456            self.shallow_clone_sync()
3457        }
3458    }
3459
3460    #[cold]
3461    unsafe fn shallow_clone_sync(&self) -> Inner {
3462        // The function requires `&self`, this means that `shallow_clone`
3463        // could be called concurrently.
3464        //
3465        // The first step is to load the value of `arc`. This will determine
3466        // how to proceed. The `Acquire` ordering synchronizes with the
3467        // `compare_and_swap` that comes later in this function. The goal is
3468        // to ensure that if `arc` is currently set to point to a `Shared`,
3469        // that the current thread acquires the associated memory.
3470        let arc: *mut Shared = self.arc.as_ptr();
3471        let kind = arc as usize & KIND_MASK;
3472
3473        if kind == KIND_ARC {
3474            let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
3475            if old_size == usize::MAX {
3476                abort();
3477            }
3478
3479            Inner {
3480                arc: NonNull::new_unchecked(arc),
3481                ..*self
3482            }
3483        } else {
3484            assert!(kind == KIND_VEC);
3485
3486            let vec_arc = (arc as usize & KIND_UNMASK) as *mut SharedVec;
3487            let old_size = (*vec_arc).ref_count.fetch_add(1, Relaxed);
3488            if old_size == usize::MAX {
3489                abort();
3490            }
3491
3492            Inner {
3493                arc: NonNull::new_unchecked(arc),
3494                ..*self
3495            }
3496        }
3497    }
3498
3499    #[inline]
3500    fn reserve(&mut self, additional: usize) {
3501        let len = self.len();
3502        let rem = self.capacity() - len;
3503
3504        if additional <= rem {
3505            // The handle can already store at least `additional` more bytes, so
3506            // there is no further work needed to be done.
3507            return;
3508        }
3509
3510        self.reserve_inner(additional)
3511    }
3512
3513    #[inline]
3514    // In separate function to allow the short-circuits in `reserve` to
3515    // be inline-able. Significant helps performance.
3516    fn reserve_inner(&mut self, additional: usize) {
3517        let len = self.len();
3518        let kind = self.kind();
3519
3520        // Always check `inline` first, because if the handle is using inline
3521        // data storage, all of the `Inner` struct fields will be gibberish.
3522        if kind == KIND_INLINE {
3523            let new_cap = len + additional;
3524
3525            // Promote to a vector
3526            *self = Inner::from_slice(new_cap, self.as_ref(), PoolId::DEFAULT.pool_ref());
3527            return;
3528        }
3529
3530        // Reserving involves abandoning the currently shared buffer and
3531        // allocating a new vector with the requested capacity.
3532        let new_cap = len + additional;
3533
3534        if kind == KIND_VEC {
3535            let vec = self.shared_vec();
3536
3537            unsafe {
3538                let vec_cap = (*vec).cap - SHARED_VEC_SIZE;
3539
3540                // First, try to reclaim the buffer. This is possible if the current
3541                // handle is the only outstanding handle pointing to the buffer.
3542                if (*vec).is_unique() && vec_cap >= new_cap {
3543                    // The capacity is sufficient, reclaim the buffer
3544                    let ptr = (vec as *mut u8).add(SHARED_VEC_SIZE);
3545                    ptr::copy(self.ptr, ptr, len);
3546
3547                    self.ptr = ptr;
3548                    self.cap = vec_cap;
3549                } else {
3550                    // Create a new vector storage
3551                    *self = Inner::from_slice(new_cap, self.as_ref(), (*vec).pool);
3552                }
3553            }
3554        } else {
3555            debug_assert!(kind == KIND_ARC);
3556
3557            let arc = self.arc.as_ptr();
3558            unsafe {
3559                // First, try to reclaim the buffer. This is possible if the current
3560                // handle is the only outstanding handle pointing to the buffer.
3561                if (*arc).is_unique() {
3562                    // This is the only handle to the buffer. It can be reclaimed.
3563                    // However, before doing the work of copying data, check to make
3564                    // sure that the vector has enough capacity.
3565                    let v = &mut (*arc).vec;
3566
3567                    if v.capacity() >= new_cap {
3568                        // The capacity is sufficient, reclaim the buffer
3569                        let ptr = v.as_mut_ptr();
3570
3571                        ptr::copy(self.ptr, ptr, len);
3572
3573                        self.ptr = ptr;
3574                        self.cap = v.capacity();
3575                        return;
3576                    }
3577                }
3578
3579                // Create a new vector storage
3580                *self = Inner::from_slice(new_cap, self.as_ref(), (*arc).pool);
3581            }
3582        }
3583    }
3584
3585    /// Returns true if the buffer is stored inline
3586    #[inline]
3587    fn is_inline(&self) -> bool {
3588        self.kind() == KIND_INLINE
3589    }
3590
3591    #[inline]
3592    fn is_inline_or_static(&self) -> bool {
3593        // The value returned by `kind` isn't itself safe, but the value could
3594        // inform what operations to take, and unsafely do something without
3595        // synchronization.
3596        //
3597        // KIND_INLINE and KIND_STATIC will *never* change, so branches on that
3598        // information is safe.
3599        let kind = self.kind();
3600        kind == KIND_INLINE || kind == KIND_STATIC
3601    }
3602
3603    /// Used for `debug_assert` statements
3604    #[inline]
3605    fn is_static(&self) -> bool {
3606        matches!(self.kind(), KIND_STATIC)
3607    }
3608
3609    #[inline]
3610    fn shared_vec(&self) -> *mut SharedVec {
3611        ((self.arc.as_ptr() as usize) & KIND_UNMASK) as *mut SharedVec
3612    }
3613
3614    #[inline]
3615    fn kind(&self) -> usize {
3616        // This function is going to probably raise some eyebrows. The function
3617        // returns true if the buffer is stored inline. This is done by checking
3618        // the least significant bit in the `arc` field.
3619        //
3620        // Now, you may notice that `arc` is an `AtomicPtr` and this is
3621        // accessing it as a normal field without performing an atomic load...
3622        //
3623        // Again, the function only cares about the least significant bit, and
3624        // this bit is set when `Inner` is created and never changed after that.
3625        // All platforms have atomic "word" operations and won't randomly flip
3626        // bits, so even without any explicit atomic operations, reading the
3627        // flag will be correct.
3628        //
3629        // This is undefined behavior due to a data race, but experimental
3630        // evidence shows that it works in practice (discussion:
3631        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3632        //
3633        // This function is very critical performance wise as it is called for
3634        // every operation. Performing an atomic load would mess with the
3635        // compiler's ability to optimize. Simple benchmarks show up to a 10%
3636        // slowdown using a `Relaxed` atomic load on x86.
3637
3638        #[cfg(target_endian = "little")]
3639        #[inline]
3640        fn imp(arc: *mut Shared) -> usize {
3641            (arc as usize) & KIND_MASK
3642        }
3643
3644        #[cfg(target_endian = "big")]
3645        #[inline]
3646        fn imp(arc: *mut Shared) -> usize {
3647            unsafe {
3648                let p: *const usize = arc as *const usize;
3649                *p & KIND_MASK
3650            }
3651        }
3652
3653        imp(self.arc.as_ptr())
3654    }
3655}
3656
3657impl Drop for Inner {
3658    fn drop(&mut self) {
3659        let kind = self.kind();
3660
3661        if kind == KIND_VEC {
3662            release_shared_vec(self.shared_vec());
3663        } else if kind == KIND_ARC {
3664            release_shared(self.arc.as_ptr());
3665        }
3666    }
3667}
3668
3669fn release_shared(ptr: *mut Shared) {
3670    // `Shared` storage... follow the drop steps from Arc.
3671    unsafe {
3672        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3673            return;
3674        }
3675
3676        // This fence is needed to prevent reordering of use of the data and
3677        // deletion of the data.  Because it is marked `Release`, the decreasing
3678        // of the reference count synchronizes with this `Acquire` fence. This
3679        // means that use of the data happens before decreasing the reference
3680        // count, which happens before this fence, which happens before the
3681        // deletion of the data.
3682        //
3683        // As explained in the [Boost documentation][1],
3684        //
3685        // > It is important to enforce any possible access to the object in one
3686        // > thread (through an existing reference) to *happen before* deleting
3687        // > the object in a different thread. This is achieved by a "release"
3688        // > operation after dropping a reference (any access to the object
3689        // > through this reference must obviously happened before), and an
3690        // > "acquire" operation before deleting the object.
3691        //
3692        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3693        atomic::fence(Acquire);
3694
3695        // Drop the data
3696        let arc = Box::from_raw(ptr);
3697        arc.pool.release(arc.vec.capacity());
3698    }
3699}
3700
3701fn release_shared_vec(ptr: *mut SharedVec) {
3702    // `Shared` storage... follow the drop steps from Arc.
3703    unsafe {
3704        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3705            return;
3706        }
3707
3708        // This fence is needed to prevent reordering of use of the data and
3709        // deletion of the data.  Because it is marked `Release`, the decreasing
3710        // of the reference count synchronizes with this `Acquire` fence. This
3711        // means that use of the data happens before decreasing the reference
3712        // count, which happens before this fence, which happens before the
3713        // deletion of the data.
3714        //
3715        // As explained in the [Boost documentation][1],
3716        //
3717        // > It is important to enforce any possible access to the object in one
3718        // > thread (through an existing reference) to *happen before* deleting
3719        // > the object in a different thread. This is achieved by a "release"
3720        // > operation after dropping a reference (any access to the object
3721        // > through this reference must obviously happened before), and an
3722        // > "acquire" operation before deleting the object.
3723        //
3724        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3725        atomic::fence(Acquire);
3726
3727        // Drop the data
3728        let cap = (*ptr).cap;
3729        (*ptr).pool.release(cap);
3730        ptr::drop_in_place(ptr);
3731        Vec::<u8>::from_raw_parts(ptr as *mut u8, 0, cap);
3732    }
3733}
3734
3735impl Shared {
3736    fn is_unique(&self) -> bool {
3737        // The goal is to check if the current handle is the only handle
3738        // that currently has access to the buffer. This is done by
3739        // checking if the `ref_count` is currently 1.
3740        //
3741        // The `Acquire` ordering synchronizes with the `Release` as
3742        // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
3743        // operation guarantees that any mutations done in other threads
3744        // are ordered before the `ref_count` is decremented. As such,
3745        // this `Acquire` will guarantee that those mutations are
3746        // visible to the current thread.
3747        self.ref_count.load(Acquire) == 1
3748    }
3749}
3750
3751impl SharedVec {
3752    fn is_unique(&self) -> bool {
3753        // This is same as Shared::is_unique() but for KIND_VEC
3754        self.ref_count.load(Acquire) == 1
3755    }
3756}
3757
3758unsafe impl Send for Inner {}
3759unsafe impl Sync for Inner {}
3760
3761/*
3762 *
3763 * ===== PartialEq / PartialOrd =====
3764 *
3765 */
3766
3767impl PartialEq<[u8]> for BytesMut {
3768    fn eq(&self, other: &[u8]) -> bool {
3769        &**self == other
3770    }
3771}
3772
3773impl<const N: usize> PartialEq<[u8; N]> for BytesMut {
3774    fn eq(&self, other: &[u8; N]) -> bool {
3775        &**self == other
3776    }
3777}
3778
3779impl PartialEq<BytesMut> for [u8] {
3780    fn eq(&self, other: &BytesMut) -> bool {
3781        *other == *self
3782    }
3783}
3784
3785impl<const N: usize> PartialEq<BytesMut> for [u8; N] {
3786    fn eq(&self, other: &BytesMut) -> bool {
3787        *other == *self
3788    }
3789}
3790
3791impl<const N: usize> PartialEq<BytesMut> for &[u8; N] {
3792    fn eq(&self, other: &BytesMut) -> bool {
3793        *other == *self
3794    }
3795}
3796
3797impl PartialEq<str> for BytesMut {
3798    fn eq(&self, other: &str) -> bool {
3799        &**self == other.as_bytes()
3800    }
3801}
3802
3803impl PartialEq<BytesMut> for str {
3804    fn eq(&self, other: &BytesMut) -> bool {
3805        *other == *self
3806    }
3807}
3808
3809impl PartialEq<Vec<u8>> for BytesMut {
3810    fn eq(&self, other: &Vec<u8>) -> bool {
3811        *self == other[..]
3812    }
3813}
3814
3815impl PartialEq<BytesMut> for Vec<u8> {
3816    fn eq(&self, other: &BytesMut) -> bool {
3817        *other == *self
3818    }
3819}
3820
3821impl PartialEq<String> for BytesMut {
3822    fn eq(&self, other: &String) -> bool {
3823        *self == other[..]
3824    }
3825}
3826
3827impl PartialEq<BytesMut> for String {
3828    fn eq(&self, other: &BytesMut) -> bool {
3829        *other == *self
3830    }
3831}
3832
3833impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
3834where
3835    BytesMut: PartialEq<T>,
3836{
3837    fn eq(&self, other: &&'a T) -> bool {
3838        *self == **other
3839    }
3840}
3841
3842impl PartialEq<BytesMut> for &[u8] {
3843    fn eq(&self, other: &BytesMut) -> bool {
3844        *other == *self
3845    }
3846}
3847
3848impl PartialEq<BytesMut> for &str {
3849    fn eq(&self, other: &BytesMut) -> bool {
3850        *other == *self
3851    }
3852}
3853
3854impl PartialEq<[u8]> for Bytes {
3855    fn eq(&self, other: &[u8]) -> bool {
3856        self.inner.as_ref() == other
3857    }
3858}
3859
3860impl<const N: usize> PartialEq<[u8; N]> for Bytes {
3861    fn eq(&self, other: &[u8; N]) -> bool {
3862        self.inner.as_ref() == other.as_ref()
3863    }
3864}
3865
3866impl PartialOrd<[u8]> for Bytes {
3867    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
3868        self.inner.as_ref().partial_cmp(other)
3869    }
3870}
3871
3872impl<const N: usize> PartialOrd<[u8; N]> for Bytes {
3873    fn partial_cmp(&self, other: &[u8; N]) -> Option<cmp::Ordering> {
3874        self.inner.as_ref().partial_cmp(other.as_ref())
3875    }
3876}
3877
3878impl PartialEq<Bytes> for [u8] {
3879    fn eq(&self, other: &Bytes) -> bool {
3880        *other == *self
3881    }
3882}
3883
3884impl<const N: usize> PartialEq<Bytes> for [u8; N] {
3885    fn eq(&self, other: &Bytes) -> bool {
3886        *other == *self
3887    }
3888}
3889
3890impl<const N: usize> PartialEq<Bytes> for &[u8; N] {
3891    fn eq(&self, other: &Bytes) -> bool {
3892        *other == *self
3893    }
3894}
3895
3896impl PartialOrd<Bytes> for [u8] {
3897    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3898        other.partial_cmp(self)
3899    }
3900}
3901
3902impl<const N: usize> PartialOrd<Bytes> for [u8; N] {
3903    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3904        other.partial_cmp(self)
3905    }
3906}
3907
3908impl PartialEq<str> for Bytes {
3909    fn eq(&self, other: &str) -> bool {
3910        self.inner.as_ref() == other.as_bytes()
3911    }
3912}
3913
3914impl PartialOrd<str> for Bytes {
3915    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
3916        self.inner.as_ref().partial_cmp(other.as_bytes())
3917    }
3918}
3919
3920impl PartialEq<Bytes> for str {
3921    fn eq(&self, other: &Bytes) -> bool {
3922        *other == *self
3923    }
3924}
3925
3926impl PartialOrd<Bytes> for str {
3927    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3928        other.partial_cmp(self)
3929    }
3930}
3931
3932impl PartialEq<Vec<u8>> for Bytes {
3933    fn eq(&self, other: &Vec<u8>) -> bool {
3934        *self == other[..]
3935    }
3936}
3937
3938impl PartialOrd<Vec<u8>> for Bytes {
3939    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
3940        self.inner.as_ref().partial_cmp(&other[..])
3941    }
3942}
3943
3944impl PartialEq<Bytes> for Vec<u8> {
3945    fn eq(&self, other: &Bytes) -> bool {
3946        *other == *self
3947    }
3948}
3949
3950impl PartialOrd<Bytes> for Vec<u8> {
3951    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3952        other.partial_cmp(self)
3953    }
3954}
3955
3956impl PartialEq<String> for Bytes {
3957    fn eq(&self, other: &String) -> bool {
3958        *self == other[..]
3959    }
3960}
3961
3962impl PartialOrd<String> for Bytes {
3963    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
3964        self.inner.as_ref().partial_cmp(other.as_bytes())
3965    }
3966}
3967
3968impl PartialEq<Bytes> for String {
3969    fn eq(&self, other: &Bytes) -> bool {
3970        *other == *self
3971    }
3972}
3973
3974impl PartialOrd<Bytes> for String {
3975    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3976        other.partial_cmp(self)
3977    }
3978}
3979
3980impl PartialEq<Bytes> for &[u8] {
3981    fn eq(&self, other: &Bytes) -> bool {
3982        *other == *self
3983    }
3984}
3985
3986impl PartialOrd<Bytes> for &[u8] {
3987    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3988        other.partial_cmp(self)
3989    }
3990}
3991
3992impl PartialEq<Bytes> for &str {
3993    fn eq(&self, other: &Bytes) -> bool {
3994        *other == *self
3995    }
3996}
3997
3998impl PartialOrd<Bytes> for &str {
3999    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
4000        other.partial_cmp(self)
4001    }
4002}
4003
4004impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
4005where
4006    Bytes: PartialEq<T>,
4007{
4008    fn eq(&self, other: &&'a T) -> bool {
4009        *self == **other
4010    }
4011}
4012
4013impl From<BytesVec> for Bytes {
4014    fn from(b: BytesVec) -> Self {
4015        b.freeze()
4016    }
4017}
4018
4019impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
4020where
4021    Bytes: PartialOrd<T>,
4022{
4023    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
4024        self.partial_cmp(&**other)
4025    }
4026}
4027
4028impl PartialEq<BytesMut> for Bytes {
4029    fn eq(&self, other: &BytesMut) -> bool {
4030        other[..] == self[..]
4031    }
4032}
4033
4034impl PartialEq<BytesVec> for Bytes {
4035    fn eq(&self, other: &BytesVec) -> bool {
4036        other[..] == self[..]
4037    }
4038}
4039
4040impl PartialEq<Bytes> for BytesVec {
4041    fn eq(&self, other: &Bytes) -> bool {
4042        other[..] == self[..]
4043    }
4044}
4045
4046impl PartialEq<Bytes> for BytesMut {
4047    fn eq(&self, other: &Bytes) -> bool {
4048        other[..] == self[..]
4049    }
4050}
4051
4052impl PartialEq<BytesMut> for BytesVec {
4053    fn eq(&self, other: &BytesMut) -> bool {
4054        other[..] == self[..]
4055    }
4056}
4057
4058impl PartialEq<BytesVec> for BytesMut {
4059    fn eq(&self, other: &BytesVec) -> bool {
4060        other[..] == self[..]
4061    }
4062}
4063
4064impl PartialEq<[u8]> for BytesVec {
4065    fn eq(&self, other: &[u8]) -> bool {
4066        &**self == other
4067    }
4068}
4069
4070impl<const N: usize> PartialEq<[u8; N]> for BytesVec {
4071    fn eq(&self, other: &[u8; N]) -> bool {
4072        &**self == other
4073    }
4074}
4075
4076impl PartialEq<BytesVec> for [u8] {
4077    fn eq(&self, other: &BytesVec) -> bool {
4078        *other == *self
4079    }
4080}
4081
4082impl<const N: usize> PartialEq<BytesVec> for [u8; N] {
4083    fn eq(&self, other: &BytesVec) -> bool {
4084        *other == *self
4085    }
4086}
4087
4088impl<const N: usize> PartialEq<BytesVec> for &[u8; N] {
4089    fn eq(&self, other: &BytesVec) -> bool {
4090        *other == *self
4091    }
4092}
4093
4094impl PartialEq<str> for BytesVec {
4095    fn eq(&self, other: &str) -> bool {
4096        &**self == other.as_bytes()
4097    }
4098}
4099
4100impl PartialEq<BytesVec> for str {
4101    fn eq(&self, other: &BytesVec) -> bool {
4102        *other == *self
4103    }
4104}
4105
4106impl PartialEq<Vec<u8>> for BytesVec {
4107    fn eq(&self, other: &Vec<u8>) -> bool {
4108        *self == other[..]
4109    }
4110}
4111
4112impl PartialEq<BytesVec> for Vec<u8> {
4113    fn eq(&self, other: &BytesVec) -> bool {
4114        *other == *self
4115    }
4116}
4117
4118impl PartialEq<String> for BytesVec {
4119    fn eq(&self, other: &String) -> bool {
4120        *self == other[..]
4121    }
4122}
4123
4124impl PartialEq<BytesVec> for String {
4125    fn eq(&self, other: &BytesVec) -> bool {
4126        *other == *self
4127    }
4128}
4129
4130impl<'a, T: ?Sized> PartialEq<&'a T> for BytesVec
4131where
4132    BytesVec: PartialEq<T>,
4133{
4134    fn eq(&self, other: &&'a T) -> bool {
4135        *self == **other
4136    }
4137}
4138
4139impl PartialEq<BytesVec> for &[u8] {
4140    fn eq(&self, other: &BytesVec) -> bool {
4141        *other == *self
4142    }
4143}
4144
4145impl PartialEq<BytesVec> for &str {
4146    fn eq(&self, other: &BytesVec) -> bool {
4147        *other == *self
4148    }
4149}
4150
4151// While there is `std::process:abort`, it's only available in Rust 1.17, and
4152// our minimum supported version is currently 1.15. So, this acts as an abort
4153// by triggering a double panic, which always aborts in Rust.
4154struct Abort;
4155
4156impl Drop for Abort {
4157    fn drop(&mut self) {
4158        panic!();
4159    }
4160}
4161
4162#[inline(never)]
4163#[cold]
4164fn abort() {
4165    let _a = Abort;
4166    panic!();
4167}
4168
4169#[cfg(test)]
4170mod tests {
4171    use std::collections::HashMap;
4172
4173    use super::*;
4174
4175    const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
4176        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
4177        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb";
4178
4179    #[test]
4180    fn trimdown() {
4181        let mut b = Bytes::from(LONG.to_vec());
4182        assert_eq!(b.inner.capacity(), 263);
4183        unsafe { b.inner.set_len(68) };
4184        assert_eq!(b.len(), 68);
4185        assert_eq!(b.inner.capacity(), 263);
4186        b.trimdown();
4187        assert_eq!(b.inner.capacity(), 96);
4188
4189        unsafe { b.inner.set_len(16) };
4190        b.trimdown();
4191        assert!(b.is_inline());
4192    }
4193
4194    #[test]
4195    #[allow(
4196        clippy::len_zero,
4197        clippy::nonminimal_bool,
4198        clippy::unnecessary_fallible_conversions
4199    )]
4200    fn bytes() {
4201        let mut b = Bytes::from(LONG.to_vec());
4202        b.clear();
4203        assert!(b.is_inline());
4204        assert!(b.is_empty());
4205        assert!(b.len() == 0);
4206
4207        let b = Bytes::from(&Bytes::from(LONG));
4208        assert_eq!(b, LONG);
4209
4210        let b = Bytes::from(BytesMut::from(LONG));
4211        assert_eq!(b, LONG);
4212
4213        let mut b: Bytes = BytesMut::try_from(b).unwrap().freeze();
4214        assert_eq!(b, LONG);
4215        assert!(!(b > b));
4216        assert_eq!(<Bytes as Buf>::remaining(&b), LONG.len());
4217        assert_eq!(<Bytes as Buf>::chunk(&b), LONG);
4218        <Bytes as Buf>::advance(&mut b, 10);
4219        assert_eq!(Buf::chunk(&b), &LONG[10..]);
4220
4221        let mut h: HashMap<Bytes, usize> = HashMap::default();
4222        h.insert(b.clone(), 1);
4223        assert_eq!(h.get(&b), Some(&1));
4224
4225        let mut b = BytesMut::try_from(LONG).unwrap();
4226        assert_eq!(b, LONG);
4227        assert_eq!(<BytesMut as Buf>::remaining(&b), LONG.len());
4228        assert_eq!(<BytesMut as BufMut>::remaining_mut(&b), 25);
4229        assert_eq!(<BytesMut as Buf>::chunk(&b), LONG);
4230        <BytesMut as Buf>::advance(&mut b, 10);
4231        assert_eq!(<BytesMut as Buf>::chunk(&b), &LONG[10..]);
4232
4233        let mut b = BytesMut::with_capacity(12);
4234        <BytesMut as BufMut>::put_i8(&mut b, 1);
4235        assert_eq!(b, b"\x01".as_ref());
4236        <BytesMut as BufMut>::put_u8(&mut b, 2);
4237        assert_eq!(b, b"\x01\x02".as_ref());
4238        <BytesMut as BufMut>::put_slice(&mut b, b"12345");
4239        assert_eq!(b, b"\x01\x0212345".as_ref());
4240        <BytesMut as BufMut>::chunk_mut(&mut b).write_byte(0, b'1');
4241        unsafe { <BytesMut as BufMut>::advance_mut(&mut b, 1) };
4242        assert_eq!(b, b"\x01\x02123451".as_ref());
4243    }
4244
4245    #[test]
4246    #[allow(clippy::unnecessary_fallible_conversions)]
4247    fn bytes_vec() {
4248        let bv = BytesVec::copy_from_slice(LONG);
4249        // SharedVec size is 32
4250        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>() * 9);
4251        assert_eq!(bv.len(), 263);
4252        assert_eq!(bv.as_ref().len(), 263);
4253        assert_eq!(bv.as_ref(), LONG);
4254
4255        let mut bv = BytesVec::copy_from_slice(&b"hello"[..]);
4256        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>());
4257        assert_eq!(bv.len(), 5);
4258        assert_eq!(bv.as_ref().len(), 5);
4259        assert_eq!(bv.as_ref()[0], b"h"[0]);
4260        bv.put_u8(b" "[0]);
4261        assert_eq!(bv.as_ref(), &b"hello "[..]);
4262        bv.put("world");
4263        assert_eq!(bv, "hello world");
4264
4265        let b = Bytes::from(bv);
4266        assert_eq!(b, "hello world");
4267
4268        let mut b = BytesMut::try_from(b).unwrap();
4269        b.put(".");
4270        assert_eq!(b, "hello world.");
4271    }
4272}