mco_redis/bytes/
bytes.rs

1use std::borrow::{Borrow, BorrowMut};
2use std::iter::{FromIterator, Iterator};
3use std::ops::{Deref, DerefMut, RangeBounds};
4use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
5use std::sync::atomic::{self, AtomicUsize};
6use std::{cmp, fmt, hash, mem, ptr, ptr::NonNull, slice, usize};
7
8use crate::bytes::pool::{PoolId, PoolRef};
9use crate::bytes::{buf::IntoIter, buf::UninitSlice, debug, Buf, BufMut};
10
11/// A reference counted contiguous slice of memory.
12///
13/// `Bytes` is an efficient container for storing and operating on contiguous
14/// slices of memory. It is intended for use primarily in networking code, but
15/// could have applications elsewhere as well.
16///
17/// `Bytes` values facilitate zero-copy network programming by allowing multiple
18/// `Bytes` objects to point to the same underlying memory. This is managed by
19/// using a reference count to track when the memory is no longer needed and can
20/// be freed.
21///
22/// ```
23/// use mco_redis::bytes::Bytes;
24///
25/// let mut mem = Bytes::from(&b"Hello world"[..]);
26/// let a = mem.slice(0..5);
27///
28/// assert_eq!(&a[..], b"Hello");
29///
30/// let b = mem.split_to(6);
31///
32/// assert_eq!(&mem[..], b"world");
33/// assert_eq!(&b[..], b"Hello ");
34/// ```
35///
36/// # Memory layout
37///
38/// The `Bytes` struct itself is fairly small, limited to a pointer to the
39/// memory and 4 `usize` fields used to track information about which segment of
40/// the underlying memory the `Bytes` handle has access to.
41///
42/// The memory layout looks like this:
43///
44/// ```text
45/// +-------+
46/// | Bytes |
47/// +-------+
48///  /      \_____
49/// |              \
50/// v               v
51/// +-----+------------------------------------+
52/// | Arc |         |      Data     |          |
53/// +-----+------------------------------------+
54/// ```
55///
56/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory
57/// slice and a pointer to the start of the region visible by the handle.
58/// `Bytes` also tracks the length of its view into the memory.
59///
60/// # Sharing
61///
62/// The memory itself is reference counted, and multiple `Bytes` objects may
63/// point to the same region. Each `Bytes` handle point to different sections within
64/// the memory region, and `Bytes` handle may or may not have overlapping views
65/// into the memory.
66///
67///
68/// ```text
69///
70///    Arc ptrs                   +---------+
71///    ________________________ / | Bytes 2 |
72///   /                           +---------+
73///  /          +-----------+     |         |
74/// |_________/ |  Bytes 1  |     |         |
75/// |           +-----------+     |         |
76/// |           |           | ___/ data     | tail
77/// |      data |      tail |/              |
78/// v           v           v               v
79/// +-----+---------------------------------+-----+
80/// | Arc |     |           |               |     |
81/// +-----+---------------------------------+-----+
82/// ```
83///
84/// # Mutating
85///
86/// While `Bytes` handles may potentially represent overlapping views of the
87/// underlying memory slice and may not be mutated, `BytesMut` handles are
88/// guaranteed to be the only handle able to view that slice of memory. As such,
89/// `BytesMut` handles are able to mutate the underlying memory. Note that
90/// holding a unique view to a region of memory does not mean that there are no
91/// other `Bytes` and `BytesMut` handles with disjoint views of the underlying
92/// memory.
93///
94/// # Inline bytes
95///
96/// As an optimization, when the slice referenced by a `Bytes` handle is small
97/// enough [^1]. In this case, a clone is no longer "shallow" and the data will
98/// be copied.  Converting from a `Vec` will never use inlining. `BytesMut` does
99/// not support data inlining and always allocates, but during converion to `Bytes`
100/// data from `BytesMut` could be inlined.
101///
102/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems.
103///
104pub struct Bytes {
105    inner: Inner,
106}
107
108/// A unique reference to a contiguous slice of memory.
109///
110/// `BytesMut` represents a unique view into a potentially shared memory region.
111/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
112/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
113/// allocations.
114///
115/// For more detail, see [Bytes](struct.Bytes.html).
116///
117/// # Growth
118///
119/// One key difference from `Vec<u8>` is that most operations **do not
120/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
121/// world");` could panic if `my_bytes` does not have enough capacity. Before
122/// writing to the buffer, ensure that there is enough remaining capacity by
123/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
124/// is preferable.
125///
126/// The only exception is `extend` which implicitly reserves required capacity.
127///
128/// # Examples
129///
130/// ```
131/// use mco_redis::bytes::{BytesMut, BufMut};
132///
133/// let mut buf = BytesMut::with_capacity(64);
134///
135/// buf.put_u8(b'h');
136/// buf.put_u8(b'e');
137/// buf.put("llo");
138///
139/// assert_eq!(&buf[..], b"hello");
140///
141/// // Freeze the buffer so that it can be shared
142/// let a = buf.freeze();
143///
144/// // This does not allocate, instead `b` points to the same memory.
145/// let b = a.clone();
146///
147/// assert_eq!(&a[..], b"hello");
148/// assert_eq!(&b[..], b"hello");
149/// ```
150pub struct BytesMut {
151    inner: Inner,
152}
153
154/// A unique reference to a contiguous slice of memory.
155///
156/// `BytesVec` represents a unique view into a potentially shared memory region.
157/// Given the uniqueness guarantee, owners of `BytesVec` handles are able to
158/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
159/// allocations. It also always allocates.
160///
161/// For more detail, see [Bytes](struct.Bytes.html).
162///
163/// # Growth
164///
165/// One key difference from `Vec<u8>` is that most operations **do not
166/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
167/// world");` could panic if `my_bytes` does not have enough capacity. Before
168/// writing to the buffer, ensure that there is enough remaining capacity by
169/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
170/// is preferable.
171///
172/// The only exception is `extend` which implicitly reserves required capacity.
173///
174/// # Examples
175///
176/// ```
177/// use mco_redis::bytes::{BytesVec, BufMut};
178///
179/// let mut buf = BytesVec::with_capacity(64);
180///
181/// buf.put_u8(b'h');
182/// buf.put_u8(b'e');
183/// buf.put("llo");
184///
185/// assert_eq!(&buf[..], b"hello");
186///
187/// // Freeze the buffer so that it can be shared
188/// let a = buf.freeze();
189///
190/// // This does not allocate, instead `b` points to the same memory.
191/// let b = a.clone();
192///
193/// assert_eq!(&a[..], b"hello");
194/// assert_eq!(&b[..], b"hello");
195/// ```
196pub struct BytesVec {
197    inner: InnerVec,
198}
199
200// Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated
201// to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions
202// that mutate the underlying buffer are only performed when the data range
203// being mutated is only available via a single `BytesMut` handle.
204//
205// # Data storage modes
206//
207// The goal of `bytes` is to be as efficient as possible across a wide range of
208// potential usage patterns. As such, `bytes` needs to be able to handle buffers
209// that are never shared, shared on a single thread, and shared across many
210// threads. `bytes` also needs to handle both tiny buffers as well as very large
211// buffers. For example, [Cassandra](http://cassandra.apache.org) values have
212// been known to be in the hundreds of megabyte, and HTTP header values can be a
213// few characters in size.
214//
215// To achieve high performance in these various situations, `Bytes` and
216// `BytesMut` use different strategies for storing the buffer depending on the
217// usage pattern.
218//
219// ## Delayed `Arc` allocation
220//
221// When a `Bytes` or `BytesMut` is first created, there is only one outstanding
222// handle referencing the buffer. Since sharing is not yet required, an `Arc`* is
223// not used and the buffer is backed by a `Vec<u8>` directly. Using an
224// `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being
225// shared, that allocation is avoided.
226//
227// When sharing does become necessary (`clone`, `split_to`, `split_off`), that
228// is when the buffer is promoted to being shareable. The `Vec<u8>` is moved
229// into an `Arc` and both the original handle and the new handle use the same
230// buffer via the `Arc`.
231//
232// * `Arc` is being used to signify an atomically reference counted cell. We
233// don't use the `Arc` implementation provided by `std` and instead use our own.
234// This ends up simplifying a number of the `unsafe` code snippets.
235//
236// ## Inlining small buffers
237//
238// The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit
239// systems, this ends up being 32 bytes, which is actually a lot of storage for
240// cases where `Bytes` is being used to represent small byte strings, such as
241// HTTP header names and values.
242//
243// To avoid any allocation at all in these cases, `Bytes` will use the struct
244// itself for storing the buffer, reserving 1 byte for meta data. This means
245// that, on 64 bit systems, 31 byte buffers require no allocation at all.
246//
247// The byte used for metadata stores a 2 bits flag used to indicate that the
248// buffer is stored inline as well as 6 bits for tracking the buffer length (the
249// return value of `Bytes::len`).
250//
251// ## Static buffers
252//
253// `Bytes` can also represent a static buffer, which is created with
254// `Bytes::from_static`. No copying or allocations are required for tracking
255// static buffers. The pointer to the `&'static [u8]`, the length, and a flag
256// tracking that the `Bytes` instance represents a static buffer is stored in
257// the `Bytes` struct.
258//
259// # Struct layout
260//
261// Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the
262// data fields as well as all of the function implementations.
263//
264// The `Inner` struct is carefully laid out in order to support the
265// functionality described above as well as being as small as possible. Size is
266// important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes
267// added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header
268// map structure.
269//
270// The `Inner` struct contains the following fields:
271//
272// * `ptr: *mut u8`
273// * `len: usize`
274// * `cap: usize`
275// * `arc: *mut Shared`
276//
277// ## `ptr: *mut u8`
278//
279// A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`,
280// this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr`
281// may have been shifted to point somewhere inside the buffer.
282//
283// When in "inlined" mode, `ptr` is used as part of the inlined buffer.
284//
285// ## `len: usize`
286//
287// The length of the handle's buffer view. When backed by a `Vec<u8>`, this is
288// always the `Vec`'s length. The slice represented by `ptr` and `len` should
289// (ideally) always be initialized memory.
290//
291// When in "inlined" mode, `len` is used as part of the inlined buffer.
292//
293// ## `cap: usize`
294//
295// The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is
296// always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len`
297// may or may not be initialized memory.
298//
299// When in "inlined" mode, `cap` is used as part of the inlined buffer.
300//
301// ## `arc: *mut Shared`
302//
303// When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this
304// will be the pointer to the `Arc` structure tracking the ref count for the
305// underlying buffer. When the pointer is null, then the `Arc` has not been
306// allocated yet and `self` is the only outstanding handle for the underlying
307// buffer.
308//
309// The lower two bits of `arc` are used to track the storage mode of `Inner`.
310// `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11`
311// indicates vector storage, not yet promoted to Arc.  Since pointers to
312// allocated structures are aligned, the lower two bits of a pointer will always
313// be 0. This allows disambiguating between a pointer and the two flags.
314//
315// When in "inlined" mode, the least significant byte of `arc` is also used to
316// store the length of the buffer view (vs. the capacity, which is a constant).
317//
318// The rest of `arc`'s bytes are used as part of the inline buffer, which means
319// that those bytes need to be located next to the `ptr`, `len`, and `cap`
320// fields, which make up the rest of the inline buffer. This requires special
321// casing the layout of `Inner` depending on if the target platform is big or
322// little endian.
323//
324// On little endian platforms, the `arc` field must be the first field in the
325// struct. On big endian platforms, the `arc` field must be the last field in
326// the struct. Since a deterministic struct layout is required, `Inner` is
327// annotated with `#[repr(C)]`.
328//
329// # Thread safety
330//
331// `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done
332// by bumping the buffer ref count and returning a new struct pointing to the
333// same buffer. However, the `Arc` structure is lazily allocated. This means
334// that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone`
335// function can be called concurrently from multiple threads. This is why an
336// `AtomicPtr` is used for the `arc` field vs. a `*const`.
337//
338// Care is taken to ensure that the need for synchronization is minimized. Most
339// operations do not require any synchronization.
340//
341#[cfg(target_endian = "little")]
342#[repr(C)]
343struct Inner {
344    // WARNING: Do not access the fields directly unless you know what you are
345    // doing. Instead, use the fns. See implementation comment above.
346    arc: NonNull<Shared>,
347    ptr: *mut u8,
348    len: usize,
349    cap: usize,
350}
351
352#[cfg(target_endian = "big")]
353#[repr(C)]
354struct Inner {
355    // WARNING: Do not access the fields directly unless you know what you are
356    // doing. Instead, use the fns. See implementation comment above.
357    ptr: *mut u8,
358    len: usize,
359    cap: usize,
360    arc: NonNull<Shared>,
361}
362
363// Thread-safe reference-counted container for the shared storage. This mostly
364// the same as `std::sync::Arc` but without the weak counter. The ref counting
365// fns are based on the ones found in `std`.
366//
367// The main reason to use `Shared` instead of `std::sync::Arc` is that it ends
368// up making the overall code simpler and easier to reason about. This is due to
369// some of the logic around setting `Inner::arc` and other ways the `arc` field
370// is used. Using `Arc` ended up requiring a number of funky transmutes and
371// other shenanigans to make it work.
372struct Shared {
373    vec: Vec<u8>,
374    ref_count: AtomicUsize,
375    pool: PoolRef,
376}
377
378struct SharedVec {
379    cap: usize,
380    len: u32,
381    offset: u32,
382    ref_count: AtomicUsize,
383    pool: PoolRef,
384}
385
386// Buffer storage strategy flags.
387const KIND_ARC: usize = 0b00;
388const KIND_INLINE: usize = 0b01;
389const KIND_STATIC: usize = 0b10;
390const KIND_VEC: usize = 0b11;
391const KIND_MASK: usize = 0b11;
392const KIND_UNMASK: usize = !KIND_MASK;
393
394const MIN_NON_ZERO_CAP: usize = 64;
395const SHARED_VEC_SIZE: usize = mem::size_of::<SharedVec>();
396
397// Bit op constants for extracting the inline length value from the `arc` field.
398const INLINE_LEN_MASK: usize = 0b1111_1100;
399const INLINE_LEN_OFFSET: usize = 2;
400
401// Byte offset from the start of `Inner` to where the inline buffer data
402// starts. On little endian platforms, the first byte of the struct is the
403// storage flag, so the data is shifted by a byte. On big endian systems, the
404// data starts at the beginning of the struct.
405#[cfg(target_endian = "little")]
406const INLINE_DATA_OFFSET: isize = 2;
407#[cfg(target_endian = "big")]
408const INLINE_DATA_OFFSET: isize = 0;
409
410// Inline buffer capacity. This is the size of `Inner` minus 1 byte for the
411// metadata.
412#[cfg(target_pointer_width = "64")]
413const INLINE_CAP: usize = 4 * 8 - 2;
414#[cfg(target_pointer_width = "32")]
415const INLINE_CAP: usize = 4 * 4 - 2;
416
417/*
418 *
419 * ===== Bytes =====
420 *
421 */
422
423impl Bytes {
424    /// Creates a new empty `Bytes`.
425    ///
426    /// This will not allocate and the returned `Bytes` handle will be empty.
427    ///
428    /// # Examples
429    ///
430    /// ```
431    /// use mco_redis::bytes::Bytes;
432    ///
433    /// let b = Bytes::new();
434    /// assert_eq!(&b[..], b"");
435    /// ```
436    #[inline]
437    pub const fn new() -> Bytes {
438        Bytes {
439            inner: Inner::empty_inline(),
440        }
441    }
442
443    /// Creates a new `Bytes` from a static slice.
444    ///
445    /// The returned `Bytes` will point directly to the static slice. There is
446    /// no allocating or copying.
447    ///
448    /// # Examples
449    ///
450    /// ```
451    /// use mco_redis::bytes::Bytes;
452    ///
453    /// let b = Bytes::from_static(b"hello");
454    /// assert_eq!(&b[..], b"hello");
455    /// ```
456    #[inline]
457    pub const fn from_static(bytes: &'static [u8]) -> Bytes {
458        Bytes {
459            inner: Inner::from_static(bytes),
460        }
461    }
462
463    /// Returns the number of bytes contained in this `Bytes`.
464    ///
465    /// # Examples
466    ///
467    /// ```
468    /// use mco_redis::bytes::Bytes;
469    ///
470    /// let b = Bytes::from(&b"hello"[..]);
471    /// assert_eq!(b.len(), 5);
472    /// ```
473    #[inline]
474    pub fn len(&self) -> usize {
475        self.inner.len()
476    }
477
478    /// Returns true if the `Bytes` has a length of 0.
479    ///
480    /// # Examples
481    ///
482    /// ```
483    /// use mco_redis::bytes::Bytes;
484    ///
485    /// let b = Bytes::new();
486    /// assert!(b.is_empty());
487    /// ```
488    #[inline]
489    pub fn is_empty(&self) -> bool {
490        self.inner.is_empty()
491    }
492
493    /// Return true if the `Bytes` uses inline allocation
494    ///
495    /// # Examples
496    /// ```
497    /// use mco_redis::bytes::{Bytes, BytesMut};
498    ///
499    /// assert!(Bytes::from(BytesMut::from(&[0, 0, 0, 0][..])).is_inline());
500    /// assert!(Bytes::from(Vec::with_capacity(4)).is_inline());
501    /// assert!(!Bytes::from(&[0; 1024][..]).is_inline());
502    /// ```
503    pub fn is_inline(&self) -> bool {
504        self.inner.is_inline()
505    }
506
507    /// Creates `Bytes` instance from slice, by copying it.
508    pub fn copy_from_slice(data: &[u8]) -> Self {
509        Self::copy_from_slice_in(data, PoolId::DEFAULT)
510    }
511
512    /// Creates `Bytes` instance from slice, by copying it.
513    pub fn copy_from_slice_in<T>(data: &[u8], pool: T) -> Self
514    where
515        PoolRef: From<T>,
516    {
517        if data.len() <= INLINE_CAP {
518            Bytes {
519                inner: Inner::from_slice_inline(data),
520            }
521        } else {
522            Bytes {
523                inner: Inner::from_slice(data.len(), data, pool.into()),
524            }
525        }
526    }
527
528    /// Returns a slice of self for the provided range.
529    ///
530    /// This will increment the reference count for the underlying memory and
531    /// return a new `Bytes` handle set to the slice.
532    ///
533    /// This operation is `O(1)`.
534    ///
535    /// # Examples
536    ///
537    /// ```
538    /// use mco_redis::bytes::Bytes;
539    ///
540    /// let a = Bytes::from(&b"hello world"[..]);
541    /// let b = a.slice(2..5);
542    ///
543    /// assert_eq!(&b[..], b"llo");
544    /// ```
545    ///
546    /// # Panics
547    ///
548    /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
549    /// will panic.
550    pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
551        use std::ops::Bound;
552
553        let len = self.len();
554
555        let begin = match range.start_bound() {
556            Bound::Included(&n) => n,
557            Bound::Excluded(&n) => n + 1,
558            Bound::Unbounded => 0,
559        };
560
561        let end = match range.end_bound() {
562            Bound::Included(&n) => n + 1,
563            Bound::Excluded(&n) => n,
564            Bound::Unbounded => len,
565        };
566
567        assert!(begin <= end);
568        assert!(end <= len);
569
570        if end - begin <= INLINE_CAP {
571            Bytes {
572                inner: Inner::from_slice_inline(&self[begin..end]),
573            }
574        } else {
575            let mut ret = self.clone();
576
577            unsafe {
578                ret.inner.set_end(end);
579                ret.inner.set_start(begin);
580            }
581            ret
582        }
583    }
584
585    /// Returns a slice of self that is equivalent to the given `subset`.
586    ///
587    /// When processing a `Bytes` buffer with other tools, one often gets a
588    /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
589    /// This function turns that `&[u8]` into another `Bytes`, as if one had
590    /// called `self.slice()` with the offsets that correspond to `subset`.
591    ///
592    /// This operation is `O(1)`.
593    ///
594    /// # Examples
595    ///
596    /// ```
597    /// use mco_redis::bytes::Bytes;
598    ///
599    /// let bytes = Bytes::from(&b"012345678"[..]);
600    /// let as_slice = bytes.as_ref();
601    /// let subset = &as_slice[2..6];
602    /// let subslice = bytes.slice_ref(&subset);
603    /// assert_eq!(&subslice[..], b"2345");
604    /// ```
605    ///
606    /// # Panics
607    ///
608    /// Requires that the given `sub` slice is in fact contained within the
609    /// `Bytes` buffer; otherwise this function will panic.
610    pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
611        let bytes_p = self.as_ptr() as usize;
612        let bytes_len = self.len();
613
614        let sub_p = subset.as_ptr() as usize;
615        let sub_len = subset.len();
616
617        assert!(sub_p >= bytes_p);
618        assert!(sub_p + sub_len <= bytes_p + bytes_len);
619
620        let sub_offset = sub_p - bytes_p;
621
622        self.slice(sub_offset..(sub_offset + sub_len))
623    }
624
625    /// Splits the bytes into two at the given index.
626    ///
627    /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
628    /// contains elements `[at, len)`.
629    ///
630    /// This is an `O(1)` operation that just increases the reference count and
631    /// sets a few indices.
632    ///
633    /// # Examples
634    ///
635    /// ```
636    /// use mco_redis::bytes::Bytes;
637    ///
638    /// let mut a = Bytes::from(&b"hello world"[..]);
639    /// let b = a.split_off(5);
640    ///
641    /// assert_eq!(&a[..], b"hello");
642    /// assert_eq!(&b[..], b" world");
643    /// ```
644    ///
645    /// # Panics
646    ///
647    /// Panics if `at > len`.
648    pub fn split_off(&mut self, at: usize) -> Bytes {
649        assert!(at <= self.len());
650
651        if at == self.len() {
652            return Bytes::new();
653        }
654
655        if at == 0 {
656            mem::replace(self, Bytes::new())
657        } else {
658            Bytes {
659                inner: self.inner.split_off(at, true),
660            }
661        }
662    }
663
664    /// Splits the bytes into two at the given index.
665    ///
666    /// Afterwards `self` contains elements `[at, len)`, and the returned
667    /// `Bytes` contains elements `[0, at)`.
668    ///
669    /// This is an `O(1)` operation that just increases the reference count and
670    /// sets a few indices.
671    ///
672    /// # Examples
673    ///
674    /// ```
675    /// use mco_redis::bytes::Bytes;
676    ///
677    /// let mut a = Bytes::from(&b"hello world"[..]);
678    /// let b = a.split_to(5);
679    ///
680    /// assert_eq!(&a[..], b" world");
681    /// assert_eq!(&b[..], b"hello");
682    /// ```
683    ///
684    /// # Panics
685    ///
686    /// Panics if `at > len`.
687    pub fn split_to(&mut self, at: usize) -> Bytes {
688        assert!(at <= self.len());
689
690        if at == self.len() {
691            return mem::replace(self, Bytes::new());
692        }
693
694        if at == 0 {
695            Bytes::new()
696        } else {
697            Bytes {
698                inner: self.inner.split_to(at, true),
699            }
700        }
701    }
702
703    /// Shortens the buffer, keeping the first `len` bytes and dropping the
704    /// rest.
705    ///
706    /// If `len` is greater than the buffer's current length, this has no
707    /// effect.
708    ///
709    /// The [`split_off`] method can emulate `truncate`, but this causes the
710    /// excess bytes to be returned instead of dropped.
711    ///
712    /// # Examples
713    ///
714    /// ```
715    /// use mco_redis::bytes::Bytes;
716    ///
717    /// let mut buf = Bytes::from(&b"hello world"[..]);
718    /// buf.truncate(5);
719    /// assert_eq!(buf, b"hello"[..]);
720    /// ```
721    ///
722    /// [`split_off`]: #method.split_off
723    #[inline]
724    pub fn truncate(&mut self, len: usize) {
725        self.inner.truncate(len, true);
726    }
727
728    /// Shortens the buffer to `len` bytes and dropping the rest.
729    ///
730    /// This is useful if underlying buffer is larger than cuurrent bytes object.
731    ///
732    /// # Examples
733    ///
734    /// ```
735    /// use mco_redis::bytes::Bytes;
736    ///
737    /// let mut buf = Bytes::from(&b"hello world"[..]);
738    /// buf.trimdown();
739    /// assert_eq!(buf, b"hello world"[..]);
740    /// ```
741    #[inline]
742    pub fn trimdown(&mut self) {
743        let kind = self.inner.kind();
744
745        // trim down only if buffer is not inline or static and
746        // buffer's unused space is greater than 64 bytes
747        if !(kind == KIND_INLINE || kind == KIND_STATIC) {
748            if self.inner.len() <= INLINE_CAP {
749                *self = Bytes {
750                    inner: Inner::from_slice_inline(self),
751                };
752            } else if self.inner.capacity() - self.inner.len() >= 64 {
753                *self = Bytes {
754                    inner: Inner::from_slice(self.len(), self, self.inner.pool()),
755                }
756            }
757        }
758    }
759
760    /// Clears the buffer, removing all data.
761    ///
762    /// # Examples
763    ///
764    /// ```
765    /// use mco_redis::bytes::Bytes;
766    ///
767    /// let mut buf = Bytes::from(&b"hello world"[..]);
768    /// buf.clear();
769    /// assert!(buf.is_empty());
770    /// ```
771    #[inline]
772    pub fn clear(&mut self) {
773        self.inner = Inner::empty_inline();
774    }
775
776    /// Attempts to convert into a `BytesMut` handle.
777    ///
778    /// This will only succeed if there are no other outstanding references to
779    /// the underlying chunk of memory. `Bytes` handles that contain inlined
780    /// bytes will always be convertible to `BytesMut`.
781    ///
782    /// # Examples
783    ///
784    /// ```
785    /// use mco_redis::bytes::Bytes;
786    ///
787    /// let a = Bytes::copy_from_slice(&b"Mary had a little lamb, little lamb, little lamb..."[..]);
788    ///
789    /// // Create a shallow clone
790    /// let b = a.clone();
791    ///
792    /// // This will fail because `b` shares a reference with `a`
793    /// let a = a.try_mut().unwrap_err();
794    ///
795    /// drop(b);
796    ///
797    /// // This will succeed
798    /// let mut a = a.try_mut().unwrap();
799    ///
800    /// a[0] = b'b';
801    ///
802    /// assert_eq!(&a[..4], b"bary");
803    /// ```
804    pub fn try_mut(self) -> Result<BytesMut, Bytes> {
805        if self.inner.is_mut_safe() {
806            Ok(BytesMut { inner: self.inner })
807        } else {
808            Err(self)
809        }
810    }
811
812    /// Returns an iterator over the bytes contained by the buffer.
813    ///
814    /// # Examples
815    ///
816    /// ```
817    /// use mco_redis::bytes::{Buf, Bytes};
818    ///
819    /// let buf = Bytes::from(&b"abc"[..]);
820    /// let mut iter = buf.iter();
821    ///
822    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
823    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
824    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
825    /// assert_eq!(iter.next(), None);
826    /// ```
827    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
828        self.chunk().iter()
829    }
830}
831
832impl Buf for Bytes {
833    #[inline]
834    fn remaining(&self) -> usize {
835        self.len()
836    }
837
838    #[inline]
839    fn chunk(&self) -> &[u8] {
840        self.inner.as_ref()
841    }
842
843    #[inline]
844    fn advance(&mut self, cnt: usize) {
845        assert!(
846            cnt <= self.inner.as_ref().len(),
847            "cannot advance past `remaining`"
848        );
849        unsafe {
850            self.inner.set_start(cnt);
851        }
852    }
853}
854
855impl bytes::buf::Buf for Bytes {
856    #[inline]
857    fn remaining(&self) -> usize {
858        self.len()
859    }
860
861    #[inline]
862    fn chunk(&self) -> &[u8] {
863        self.inner.as_ref()
864    }
865
866    #[inline]
867    fn advance(&mut self, cnt: usize) {
868        assert!(
869            cnt <= self.inner.as_ref().len(),
870            "cannot advance past `remaining`"
871        );
872        unsafe {
873            self.inner.set_start(cnt);
874        }
875    }
876}
877
878impl Clone for Bytes {
879    fn clone(&self) -> Bytes {
880        Bytes {
881            inner: unsafe { self.inner.shallow_clone() },
882        }
883    }
884}
885
886impl AsRef<[u8]> for Bytes {
887    #[inline]
888    fn as_ref(&self) -> &[u8] {
889        self.inner.as_ref()
890    }
891}
892
893impl Deref for Bytes {
894    type Target = [u8];
895
896    #[inline]
897    fn deref(&self) -> &[u8] {
898        self.inner.as_ref()
899    }
900}
901
902impl From<BytesMut> for Bytes {
903    fn from(src: BytesMut) -> Bytes {
904        src.freeze()
905    }
906}
907
908impl From<Vec<u8>> for Bytes {
909    /// Convert a `Vec` into a `Bytes`
910    ///
911    /// This constructor may be used to avoid the inlining optimization used by
912    /// `with_capacity`.  A `Bytes` constructed this way will always store its
913    /// data on the heap.
914    fn from(src: Vec<u8>) -> Bytes {
915        if src.is_empty() {
916            Bytes::new()
917        } else if src.len() <= INLINE_CAP {
918            Bytes {
919                inner: Inner::from_slice_inline(&src),
920            }
921        } else {
922            BytesMut::from(src).freeze()
923        }
924    }
925}
926
927impl From<String> for Bytes {
928    fn from(src: String) -> Bytes {
929        if src.is_empty() {
930            Bytes::new()
931        } else if src.bytes().len() <= INLINE_CAP {
932            Bytes {
933                inner: Inner::from_slice_inline(src.as_bytes()),
934            }
935        } else {
936            BytesMut::from(src).freeze()
937        }
938    }
939}
940
941impl From<&'static [u8]> for Bytes {
942    fn from(src: &'static [u8]) -> Bytes {
943        Bytes::from_static(src)
944    }
945}
946
947impl From<&'static str> for Bytes {
948    fn from(src: &'static str) -> Bytes {
949        Bytes::from_static(src.as_bytes())
950    }
951}
952
953impl FromIterator<u8> for Bytes {
954    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
955        BytesMut::from_iter(into_iter).freeze()
956    }
957}
958
959impl<'a> FromIterator<&'a u8> for Bytes {
960    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
961        BytesMut::from_iter(into_iter).freeze()
962    }
963}
964
965impl Eq for Bytes {}
966
967impl PartialEq for Bytes {
968    fn eq(&self, other: &Bytes) -> bool {
969        self.inner.as_ref() == other.inner.as_ref()
970    }
971}
972
973impl PartialOrd for Bytes {
974    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
975        self.inner.as_ref().partial_cmp(other.inner.as_ref())
976    }
977}
978
979impl Ord for Bytes {
980    fn cmp(&self, other: &Bytes) -> cmp::Ordering {
981        self.inner.as_ref().cmp(other.inner.as_ref())
982    }
983}
984
985impl Default for Bytes {
986    #[inline]
987    fn default() -> Bytes {
988        Bytes::new()
989    }
990}
991
992impl fmt::Debug for Bytes {
993    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
994        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
995    }
996}
997
998impl hash::Hash for Bytes {
999    fn hash<H>(&self, state: &mut H)
1000    where
1001        H: hash::Hasher,
1002    {
1003        let s: &[u8] = self.as_ref();
1004        s.hash(state);
1005    }
1006}
1007
1008impl Borrow<[u8]> for Bytes {
1009    fn borrow(&self) -> &[u8] {
1010        self.as_ref()
1011    }
1012}
1013
1014impl IntoIterator for Bytes {
1015    type Item = u8;
1016    type IntoIter = IntoIter<Bytes>;
1017
1018    fn into_iter(self) -> Self::IntoIter {
1019        IntoIter::new(self)
1020    }
1021}
1022
1023impl<'a> IntoIterator for &'a Bytes {
1024    type Item = &'a u8;
1025    type IntoIter = std::slice::Iter<'a, u8>;
1026
1027    fn into_iter(self) -> Self::IntoIter {
1028        self.as_ref().iter()
1029    }
1030}
1031
1032/*
1033 *
1034 * ===== BytesMut =====
1035 *
1036 */
1037
1038impl BytesMut {
1039    /// Creates a new `BytesMut` with the specified capacity.
1040    ///
1041    /// The returned `BytesMut` will be able to hold at least `capacity` bytes
1042    /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
1043    /// then `BytesMut` will not allocate.
1044    ///
1045    /// It is important to note that this function does not specify the length
1046    /// of the returned `BytesMut`, but only the capacity.
1047    ///
1048    /// # Panics
1049    ///
1050    /// Panics if `capacity` greater than 60bit for 64bit systems
1051    /// and 28bit for 32bit systems
1052    ///
1053    /// # Examples
1054    ///
1055    /// ```
1056    /// use mco_redis::bytes::{BytesMut, BufMut};
1057    ///
1058    /// let mut bytes = BytesMut::with_capacity(64);
1059    ///
1060    /// // `bytes` contains no data, even though there is capacity
1061    /// assert_eq!(bytes.len(), 0);
1062    ///
1063    /// bytes.put(&b"hello world"[..]);
1064    ///
1065    /// assert_eq!(&bytes[..], b"hello world");
1066    /// ```
1067    #[inline]
1068    pub fn with_capacity(capacity: usize) -> BytesMut {
1069        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1070    }
1071
1072    /// Creates a new `BytesMut` with the specified capacity and in specified memory pool.
1073    ///
1074    /// # Examples
1075    ///
1076    /// ```
1077    /// use mco_redis::bytes::{BytesMut, BufMut, PoolId};
1078    ///
1079    /// let mut bytes = BytesMut::with_capacity_in(64, PoolId::P1);
1080    ///
1081    /// // `bytes` contains no data, even though there is capacity
1082    /// assert_eq!(bytes.len(), 0);
1083    ///
1084    /// bytes.put(&b"hello world"[..]);
1085    ///
1086    /// assert_eq!(&bytes[..], b"hello world");
1087    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
1088    /// ```
1089    #[inline]
1090    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesMut
1091    where
1092        PoolRef: From<T>,
1093    {
1094        BytesMut {
1095            inner: Inner::with_capacity(capacity, pool.into()),
1096        }
1097    }
1098
1099    /// Creates a new `BytesMut` from slice, by copying it.
1100    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
1101        Self::copy_from_slice_in(src, PoolId::DEFAULT)
1102    }
1103
1104    /// Creates a new `BytesMut` from slice, by copying it.
1105    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
1106    where
1107        T: AsRef<[u8]>,
1108        PoolRef: From<U>,
1109    {
1110        let s = src.as_ref();
1111        BytesMut {
1112            inner: Inner::from_slice(s.len(), s, pool.into()),
1113        }
1114    }
1115
1116    #[inline]
1117    /// Convert a `Vec` into a `BytesMut`
1118    pub fn from_vec<T>(src: Vec<u8>, pool: T) -> BytesMut
1119    where
1120        PoolRef: From<T>,
1121    {
1122        BytesMut {
1123            inner: Inner::from_vec(src, pool.into()),
1124        }
1125    }
1126
1127    /// Creates a new `BytesMut` with default capacity.
1128    ///
1129    /// Resulting object has length 0 and unspecified capacity.
1130    /// This function does not allocate.
1131    ///
1132    /// # Examples
1133    ///
1134    /// ```
1135    /// use mco_redis::bytes::{BytesMut, BufMut};
1136    ///
1137    /// let mut bytes = BytesMut::new();
1138    ///
1139    /// assert_eq!(0, bytes.len());
1140    ///
1141    /// bytes.reserve(2);
1142    /// bytes.put_slice(b"xy");
1143    ///
1144    /// assert_eq!(&b"xy"[..], &bytes[..]);
1145    /// ```
1146    #[inline]
1147    pub fn new() -> BytesMut {
1148        BytesMut::with_capacity(MIN_NON_ZERO_CAP)
1149    }
1150
1151    /// Returns the number of bytes contained in this `BytesMut`.
1152    ///
1153    /// # Examples
1154    ///
1155    /// ```
1156    /// use mco_redis::bytes::BytesMut;
1157    ///
1158    /// let b = BytesMut::from(&b"hello"[..]);
1159    /// assert_eq!(b.len(), 5);
1160    /// ```
1161    #[inline]
1162    pub fn len(&self) -> usize {
1163        self.inner.len()
1164    }
1165
1166    /// Returns true if the `BytesMut` has a length of 0.
1167    ///
1168    /// # Examples
1169    ///
1170    /// ```
1171    /// use mco_redis::bytes::BytesMut;
1172    ///
1173    /// let b = BytesMut::with_capacity(64);
1174    /// assert!(b.is_empty());
1175    /// ```
1176    #[inline]
1177    pub fn is_empty(&self) -> bool {
1178        self.inner.is_empty()
1179    }
1180
1181    /// Returns the number of bytes the `BytesMut` can hold without reallocating.
1182    ///
1183    /// # Examples
1184    ///
1185    /// ```
1186    /// use mco_redis::bytes::BytesMut;
1187    ///
1188    /// let b = BytesMut::with_capacity(64);
1189    /// assert_eq!(b.capacity(), 64);
1190    /// ```
1191    #[inline]
1192    pub fn capacity(&self) -> usize {
1193        self.inner.capacity()
1194    }
1195
1196    /// Converts `self` into an immutable `Bytes`.
1197    ///
1198    /// The conversion is zero cost and is used to indicate that the slice
1199    /// referenced by the handle will no longer be mutated. Once the conversion
1200    /// is done, the handle can be cloned and shared across threads.
1201    ///
1202    /// # Examples
1203    ///
1204    /// ```
1205    /// use mco_redis::bytes::{BytesMut, BufMut};
1206    /// use std::thread;
1207    ///
1208    /// let mut b = BytesMut::with_capacity(64);
1209    /// b.put("hello world");
1210    /// let b1 = b.freeze();
1211    /// let b2 = b1.clone();
1212    ///
1213    /// let th = thread::spawn(move || {
1214    ///     assert_eq!(&b1[..], b"hello world");
1215    /// });
1216    ///
1217    /// assert_eq!(&b2[..], b"hello world");
1218    /// th.join().unwrap();
1219    /// ```
1220    #[inline]
1221    pub fn freeze(self) -> Bytes {
1222        if self.inner.len() <= INLINE_CAP {
1223            Bytes {
1224                inner: self.inner.to_inline(),
1225            }
1226        } else {
1227            Bytes { inner: self.inner }
1228        }
1229    }
1230
1231    /// Splits the bytes into two at the given index.
1232    ///
1233    /// Afterwards `self` contains elements `[0, at)`, and the returned
1234    /// `BytesMut` contains elements `[at, capacity)`.
1235    ///
1236    /// This is an `O(1)` operation that just increases the reference count
1237    /// and sets a few indices.
1238    ///
1239    /// # Examples
1240    ///
1241    /// ```
1242    /// use mco_redis::bytes::BytesMut;
1243    ///
1244    /// let mut a = BytesMut::from(&b"hello world"[..]);
1245    /// let mut b = a.split_off(5);
1246    ///
1247    /// a[0] = b'j';
1248    /// b[0] = b'!';
1249    ///
1250    /// assert_eq!(&a[..], b"jello");
1251    /// assert_eq!(&b[..], b"!world");
1252    /// ```
1253    ///
1254    /// # Panics
1255    ///
1256    /// Panics if `at > capacity`.
1257    pub fn split_off(&mut self, at: usize) -> BytesMut {
1258        BytesMut {
1259            inner: self.inner.split_off(at, false),
1260        }
1261    }
1262
1263    /// Removes the bytes from the current view, returning them in a new
1264    /// `BytesMut` handle.
1265    ///
1266    /// Afterwards, `self` will be empty, but will retain any additional
1267    /// capacity that it had before the operation. This is identical to
1268    /// `self.split_to(self.len())`.
1269    ///
1270    /// This is an `O(1)` operation that just increases the reference count and
1271    /// sets a few indices.
1272    ///
1273    /// # Examples
1274    ///
1275    /// ```
1276    /// use mco_redis::bytes::{BytesMut, BufMut};
1277    ///
1278    /// let mut buf = BytesMut::with_capacity(1024);
1279    /// buf.put(&b"hello world"[..]);
1280    ///
1281    /// let other = buf.split();
1282    ///
1283    /// assert!(buf.is_empty());
1284    /// assert_eq!(1013, buf.capacity());
1285    ///
1286    /// assert_eq!(other, b"hello world"[..]);
1287    /// ```
1288    pub fn split(&mut self) -> BytesMut {
1289        let len = self.len();
1290        self.split_to(len)
1291    }
1292
1293    /// Splits the buffer into two at the given index.
1294    ///
1295    /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
1296    /// contains elements `[0, at)`.
1297    ///
1298    /// This is an `O(1)` operation that just increases the reference count and
1299    /// sets a few indices.
1300    ///
1301    /// # Examples
1302    ///
1303    /// ```
1304    /// use mco_redis::bytes::BytesMut;
1305    ///
1306    /// let mut a = BytesMut::from(&b"hello world"[..]);
1307    /// let mut b = a.split_to(5);
1308    ///
1309    /// a[0] = b'!';
1310    /// b[0] = b'j';
1311    ///
1312    /// assert_eq!(&a[..], b"!world");
1313    /// assert_eq!(&b[..], b"jello");
1314    /// ```
1315    ///
1316    /// # Panics
1317    ///
1318    /// Panics if `at > len`.
1319    pub fn split_to(&mut self, at: usize) -> BytesMut {
1320        assert!(at <= self.len());
1321
1322        BytesMut {
1323            inner: self.inner.split_to(at, false),
1324        }
1325    }
1326
1327    /// Shortens the buffer, keeping the first `len` bytes and dropping the
1328    /// rest.
1329    ///
1330    /// If `len` is greater than the buffer's current length, this has no
1331    /// effect.
1332    ///
1333    /// The [`split_off`] method can emulate `truncate`, but this causes the
1334    /// excess bytes to be returned instead of dropped.
1335    ///
1336    /// # Examples
1337    ///
1338    /// ```
1339    /// use mco_redis::bytes::BytesMut;
1340    ///
1341    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1342    /// buf.truncate(5);
1343    /// assert_eq!(buf, b"hello"[..]);
1344    /// ```
1345    ///
1346    /// [`split_off`]: #method.split_off
1347    pub fn truncate(&mut self, len: usize) {
1348        self.inner.truncate(len, false);
1349    }
1350
1351    /// Clears the buffer, removing all data.
1352    ///
1353    /// # Examples
1354    ///
1355    /// ```
1356    /// use mco_redis::bytes::BytesMut;
1357    ///
1358    /// let mut buf = BytesMut::from(&b"hello world"[..]);
1359    /// buf.clear();
1360    /// assert!(buf.is_empty());
1361    /// ```
1362    pub fn clear(&mut self) {
1363        self.truncate(0);
1364    }
1365
1366    /// Resizes the buffer so that `len` is equal to `new_len`.
1367    ///
1368    /// If `new_len` is greater than `len`, the buffer is extended by the
1369    /// difference with each additional byte set to `value`. If `new_len` is
1370    /// less than `len`, the buffer is simply truncated.
1371    ///
1372    /// # Panics
1373    ///
1374    /// Panics if `new_len` greater than 60bit for 64bit systems
1375    /// and 28bit for 32bit systems
1376    ///
1377    /// # Examples
1378    ///
1379    /// ```
1380    /// use mco_redis::bytes::BytesMut;
1381    ///
1382    /// let mut buf = BytesMut::new();
1383    ///
1384    /// buf.resize(3, 0x1);
1385    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
1386    ///
1387    /// buf.resize(2, 0x2);
1388    /// assert_eq!(&buf[..], &[0x1, 0x1]);
1389    ///
1390    /// buf.resize(4, 0x3);
1391    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
1392    /// ```
1393    #[inline]
1394    pub fn resize(&mut self, new_len: usize, value: u8) {
1395        self.inner.resize(new_len, value);
1396    }
1397
1398    /// Sets the length of the buffer.
1399    ///
1400    /// This will explicitly set the size of the buffer without actually
1401    /// modifying the data, so it is up to the caller to ensure that the data
1402    /// has been initialized.
1403    ///
1404    /// # Examples
1405    ///
1406    /// ```
1407    /// use mco_redis::bytes::BytesMut;
1408    ///
1409    /// let mut b = BytesMut::from(&b"hello world"[..]);
1410    ///
1411    /// unsafe {
1412    ///     b.set_len(5);
1413    /// }
1414    ///
1415    /// assert_eq!(&b[..], b"hello");
1416    ///
1417    /// unsafe {
1418    ///     b.set_len(11);
1419    /// }
1420    ///
1421    /// assert_eq!(&b[..], b"hello world");
1422    /// ```
1423    ///
1424    /// # Panics
1425    ///
1426    /// This method will panic if `len` is out of bounds for the underlying
1427    /// slice or if it comes after the `end` of the configured window.
1428    #[inline]
1429    #[allow(clippy::missing_safety_doc)]
1430    pub unsafe fn set_len(&mut self, len: usize) {
1431        self.inner.set_len(len)
1432    }
1433
1434    /// Reserves capacity for at least `additional` more bytes to be inserted
1435    /// into the given `BytesMut`.
1436    ///
1437    /// More than `additional` bytes may be reserved in order to avoid frequent
1438    /// reallocations. A call to `reserve` may result in an allocation.
1439    ///
1440    /// Before allocating new buffer space, the function will attempt to reclaim
1441    /// space in the existing buffer. If the current handle references a small
1442    /// view in the original buffer and all other handles have been dropped,
1443    /// and the requested capacity is less than or equal to the existing
1444    /// buffer's capacity, then the current view will be copied to the front of
1445    /// the buffer and the handle will take ownership of the full buffer.
1446    ///
1447    /// # Panics
1448    ///
1449    /// Panics if new capacity is greater than 60bit for 64bit systems
1450    /// and 28bit for 32bit systems
1451    ///
1452    /// # Examples
1453    ///
1454    /// In the following example, a new buffer is allocated.
1455    ///
1456    /// ```
1457    /// use mco_redis::bytes::BytesMut;
1458    ///
1459    /// let mut buf = BytesMut::from(&b"hello"[..]);
1460    /// buf.reserve(64);
1461    /// assert!(buf.capacity() >= 69);
1462    /// ```
1463    ///
1464    /// In the following example, the existing buffer is reclaimed.
1465    ///
1466    /// ```
1467    /// use mco_redis::bytes::{BytesMut, BufMut};
1468    ///
1469    /// let mut buf = BytesMut::with_capacity(128);
1470    /// buf.put(&[0; 64][..]);
1471    ///
1472    /// let ptr = buf.as_ptr();
1473    /// let other = buf.split();
1474    ///
1475    /// assert!(buf.is_empty());
1476    /// assert_eq!(buf.capacity(), 64);
1477    ///
1478    /// drop(other);
1479    /// buf.reserve(128);
1480    ///
1481    /// assert_eq!(buf.capacity(), 128);
1482    /// assert_eq!(buf.as_ptr(), ptr);
1483    /// ```
1484    ///
1485    /// # Panics
1486    ///
1487    /// Panics if the new capacity overflows `usize`.
1488    #[inline]
1489    pub fn reserve(&mut self, additional: usize) {
1490        let len = self.len();
1491        let rem = self.capacity() - len;
1492
1493        if additional <= rem {
1494            // The handle can already store at least `additional` more bytes, so
1495            // there is no further work needed to be done.
1496            return;
1497        }
1498
1499        self.inner.reserve_inner(additional);
1500    }
1501
1502    /// Appends given bytes to this object.
1503    ///
1504    /// If this `BytesMut` object has not enough capacity, it is resized first.
1505    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
1506    ///
1507    /// # Examples
1508    ///
1509    /// ```
1510    /// use mco_redis::bytes::BytesMut;
1511    ///
1512    /// let mut buf = BytesMut::with_capacity(0);
1513    /// buf.extend_from_slice(b"aaabbb");
1514    /// buf.extend_from_slice(b"cccddd");
1515    ///
1516    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
1517    /// ```
1518    #[inline]
1519    pub fn extend_from_slice(&mut self, extend: &[u8]) {
1520        self.put_slice(extend);
1521    }
1522
1523    /// Returns an iterator over the bytes contained by the buffer.
1524    ///
1525    /// # Examples
1526    ///
1527    /// ```
1528    /// use mco_redis::bytes::{Buf, BytesMut};
1529    ///
1530    /// let buf = BytesMut::from(&b"abc"[..]);
1531    /// let mut iter = buf.iter();
1532    ///
1533    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
1534    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
1535    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
1536    /// assert_eq!(iter.next(), None);
1537    /// ```
1538    #[inline]
1539    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
1540        self.chunk().iter()
1541    }
1542
1543    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
1544        self.inner.move_to_pool(pool);
1545    }
1546}
1547
1548impl Buf for BytesMut {
1549    #[inline]
1550    fn remaining(&self) -> usize {
1551        self.len()
1552    }
1553
1554    #[inline]
1555    fn chunk(&self) -> &[u8] {
1556        self.inner.as_ref()
1557    }
1558
1559    #[inline]
1560    fn advance(&mut self, cnt: usize) {
1561        assert!(
1562            cnt <= self.inner.as_ref().len(),
1563            "cannot advance past `remaining`"
1564        );
1565        unsafe {
1566            self.inner.set_start(cnt);
1567        }
1568    }
1569}
1570
1571impl BufMut for BytesMut {
1572    #[inline]
1573    fn remaining_mut(&self) -> usize {
1574        self.capacity() - self.len()
1575    }
1576
1577    #[inline]
1578    unsafe fn advance_mut(&mut self, cnt: usize) {
1579        let new_len = self.len() + cnt;
1580
1581        // This call will panic if `cnt` is too big
1582        self.inner.set_len(new_len);
1583    }
1584
1585    #[inline]
1586    fn chunk_mut(&mut self) -> &mut UninitSlice {
1587        let len = self.len();
1588
1589        unsafe {
1590            // This will never panic as `len` can never become invalid
1591            let ptr = &mut self.inner.as_raw()[len..];
1592
1593            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
1594        }
1595    }
1596
1597    #[inline]
1598    fn put_slice(&mut self, src: &[u8]) {
1599        let len = src.len();
1600        self.reserve(len);
1601
1602        unsafe {
1603            ptr::copy_nonoverlapping(
1604                src.as_ptr(),
1605                self.chunk_mut().as_mut_ptr() as *mut u8,
1606                len,
1607            );
1608            self.advance_mut(len);
1609        }
1610    }
1611
1612    #[inline]
1613    fn put_u8(&mut self, n: u8) {
1614        self.reserve(1);
1615        self.inner.put_u8(n);
1616    }
1617
1618    #[inline]
1619    fn put_i8(&mut self, n: i8) {
1620        self.reserve(1);
1621        self.put_u8(n as u8);
1622    }
1623}
1624
1625impl AsRef<[u8]> for BytesMut {
1626    #[inline]
1627    fn as_ref(&self) -> &[u8] {
1628        self.inner.as_ref()
1629    }
1630}
1631
1632impl AsMut<[u8]> for BytesMut {
1633    #[inline]
1634    fn as_mut(&mut self) -> &mut [u8] {
1635        self.inner.as_mut()
1636    }
1637}
1638
1639impl Deref for BytesMut {
1640    type Target = [u8];
1641
1642    #[inline]
1643    fn deref(&self) -> &[u8] {
1644        self.as_ref()
1645    }
1646}
1647
1648impl DerefMut for BytesMut {
1649    #[inline]
1650    fn deref_mut(&mut self) -> &mut [u8] {
1651        self.inner.as_mut()
1652    }
1653}
1654
1655impl From<Vec<u8>> for BytesMut {
1656    #[inline]
1657    /// Convert a `Vec` into a `BytesMut`
1658    ///
1659    /// This constructor may be used to avoid the inlining optimization used by
1660    /// `with_capacity`.  A `BytesMut` constructed this way will always store
1661    /// its data on the heap.
1662    fn from(src: Vec<u8>) -> BytesMut {
1663        BytesMut::from_vec(src, PoolId::DEFAULT.pool_ref())
1664    }
1665}
1666
1667impl From<String> for BytesMut {
1668    #[inline]
1669    fn from(src: String) -> BytesMut {
1670        BytesMut::from_vec(src.into_bytes(), PoolId::DEFAULT.pool_ref())
1671    }
1672}
1673
1674impl<'a> From<&'a [u8]> for BytesMut {
1675    fn from(src: &'a [u8]) -> BytesMut {
1676        let len = src.len();
1677
1678        if len == 0 {
1679            BytesMut::new()
1680        } else {
1681            BytesMut::copy_from_slice_in(src, PoolId::DEFAULT.pool_ref())
1682        }
1683    }
1684}
1685
1686impl<'a> From<&'a str> for BytesMut {
1687    #[inline]
1688    fn from(src: &'a str) -> BytesMut {
1689        BytesMut::from(src.as_bytes())
1690    }
1691}
1692
1693impl From<Bytes> for BytesMut {
1694    #[inline]
1695    fn from(src: Bytes) -> BytesMut {
1696        src.try_mut()
1697            .unwrap_or_else(|src| BytesMut::copy_from_slice_in(&src[..], src.inner.pool()))
1698    }
1699}
1700
1701impl Eq for BytesMut {}
1702
1703impl PartialEq for BytesMut {
1704    #[inline]
1705    fn eq(&self, other: &BytesMut) -> bool {
1706        self.inner.as_ref() == other.inner.as_ref()
1707    }
1708}
1709
1710impl Default for BytesMut {
1711    #[inline]
1712    fn default() -> BytesMut {
1713        BytesMut::new()
1714    }
1715}
1716
1717impl Borrow<[u8]> for BytesMut {
1718    #[inline]
1719    fn borrow(&self) -> &[u8] {
1720        self.as_ref()
1721    }
1722}
1723
1724impl BorrowMut<[u8]> for BytesMut {
1725    #[inline]
1726    fn borrow_mut(&mut self) -> &mut [u8] {
1727        self.as_mut()
1728    }
1729}
1730
1731impl fmt::Debug for BytesMut {
1732    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1733        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
1734    }
1735}
1736
1737impl fmt::Write for BytesMut {
1738    #[inline]
1739    fn write_str(&mut self, s: &str) -> fmt::Result {
1740        if self.remaining_mut() >= s.len() {
1741            self.put_slice(s.as_bytes());
1742            Ok(())
1743        } else {
1744            Err(fmt::Error)
1745        }
1746    }
1747
1748    #[inline]
1749    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1750        fmt::write(self, args)
1751    }
1752}
1753
1754impl Clone for BytesMut {
1755    #[inline]
1756    fn clone(&self) -> BytesMut {
1757        BytesMut {
1758            inner: unsafe { self.inner.shallow_clone() },
1759        }
1760    }
1761}
1762
1763impl IntoIterator for BytesMut {
1764    type Item = u8;
1765    type IntoIter = IntoIter<BytesMut>;
1766
1767    fn into_iter(self) -> Self::IntoIter {
1768        IntoIter::new(self)
1769    }
1770}
1771
1772impl<'a> IntoIterator for &'a BytesMut {
1773    type Item = &'a u8;
1774    type IntoIter = std::slice::Iter<'a, u8>;
1775
1776    fn into_iter(self) -> Self::IntoIter {
1777        self.as_ref().iter()
1778    }
1779}
1780
1781impl FromIterator<u8> for BytesMut {
1782    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1783        let iter = into_iter.into_iter();
1784        let (min, maybe_max) = iter.size_hint();
1785
1786        let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min));
1787        for i in iter {
1788            out.reserve(1);
1789            out.put_u8(i);
1790        }
1791
1792        out
1793    }
1794}
1795
1796impl<'a> FromIterator<&'a u8> for BytesMut {
1797    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1798        into_iter.into_iter().copied().collect::<BytesMut>()
1799    }
1800}
1801
1802impl Extend<u8> for BytesMut {
1803    fn extend<T>(&mut self, iter: T)
1804    where
1805        T: IntoIterator<Item = u8>,
1806    {
1807        let iter = iter.into_iter();
1808
1809        let (lower, _) = iter.size_hint();
1810        self.reserve(lower);
1811
1812        for b in iter {
1813            self.put_u8(b);
1814        }
1815    }
1816}
1817
1818impl<'a> Extend<&'a u8> for BytesMut {
1819    fn extend<T>(&mut self, iter: T)
1820    where
1821        T: IntoIterator<Item = &'a u8>,
1822    {
1823        self.extend(iter.into_iter().copied())
1824    }
1825}
1826
1827/*
1828 *
1829 * ===== BytesVec =====
1830 *
1831 */
1832
1833impl BytesVec {
1834    /// Creates a new `BytesVec` with the specified capacity.
1835    ///
1836    /// The returned `BytesVec` will be able to hold at least `capacity` bytes
1837    /// without reallocating.
1838    ///
1839    /// It is important to note that this function does not specify the length
1840    /// of the returned `BytesVec`, but only the capacity.
1841    ///
1842    /// # Panics
1843    ///
1844    /// Panics if `capacity` greater than 60bit for 64bit systems
1845    /// and 28bit for 32bit systems
1846    ///
1847    /// # Examples
1848    ///
1849    /// ```
1850    /// use mco_redis::bytes::{BytesVec, BufMut};
1851    ///
1852    /// let mut bytes = BytesVec::with_capacity(64);
1853    ///
1854    /// // `bytes` contains no data, even though there is capacity
1855    /// assert_eq!(bytes.len(), 0);
1856    ///
1857    /// bytes.put(&b"hello world"[..]);
1858    ///
1859    /// assert_eq!(&bytes[..], b"hello world");
1860    /// ```
1861    #[inline]
1862    pub fn with_capacity(capacity: usize) -> BytesVec {
1863        Self::with_capacity_in(capacity, PoolId::DEFAULT.pool_ref())
1864    }
1865
1866    /// Creates a new `BytesVec` with the specified capacity and in specified memory pool.
1867    ///
1868    /// # Examples
1869    ///
1870    /// ```
1871    /// use mco_redis::bytes::{BytesVec, BufMut, PoolId};
1872    ///
1873    /// let mut bytes = BytesVec::with_capacity_in(64, PoolId::P1);
1874    ///
1875    /// // `bytes` contains no data, even though there is capacity
1876    /// assert_eq!(bytes.len(), 0);
1877    ///
1878    /// bytes.put(&b"hello world"[..]);
1879    ///
1880    /// assert_eq!(&bytes[..], b"hello world");
1881    /// assert!(PoolId::P1.pool_ref().allocated() > 0);
1882    /// ```
1883    #[inline]
1884    pub fn with_capacity_in<T>(capacity: usize, pool: T) -> BytesVec
1885    where
1886        PoolRef: From<T>,
1887    {
1888        BytesVec {
1889            inner: InnerVec::with_capacity(capacity, pool.into()),
1890        }
1891    }
1892
1893    /// Creates a new `BytesVec` from slice, by copying it.
1894    pub fn copy_from_slice<T: AsRef<[u8]>>(src: T) -> Self {
1895        Self::copy_from_slice_in(src, PoolId::DEFAULT)
1896    }
1897
1898    /// Creates a new `BytesVec` from slice, by copying it.
1899    pub fn copy_from_slice_in<T, U>(src: T, pool: U) -> Self
1900    where
1901        T: AsRef<[u8]>,
1902        PoolRef: From<U>,
1903    {
1904        let s = src.as_ref();
1905        BytesVec {
1906            inner: InnerVec::from_slice(s.len(), s, pool.into()),
1907        }
1908    }
1909
1910    /// Creates a new `BytesVec` with default capacity.
1911    ///
1912    /// Resulting object has length 0 and unspecified capacity.
1913    /// This function does not allocate.
1914    ///
1915    /// # Examples
1916    ///
1917    /// ```
1918    /// use mco_redis::bytes::{BytesVec, BufMut};
1919    ///
1920    /// let mut bytes = BytesVec::new();
1921    ///
1922    /// assert_eq!(0, bytes.len());
1923    ///
1924    /// bytes.reserve(2);
1925    /// bytes.put_slice(b"xy");
1926    ///
1927    /// assert_eq!(&b"xy"[..], &bytes[..]);
1928    /// ```
1929    #[inline]
1930    pub fn new() -> BytesVec {
1931        BytesVec::with_capacity(MIN_NON_ZERO_CAP)
1932    }
1933
1934    /// Returns the number of bytes contained in this `BytesVec`.
1935    ///
1936    /// # Examples
1937    ///
1938    /// ```
1939    /// use mco_redis::bytes::BytesVec;
1940    ///
1941    /// let b = BytesVec::copy_from_slice(&b"hello"[..]);
1942    /// assert_eq!(b.len(), 5);
1943    /// ```
1944    #[inline]
1945    pub fn len(&self) -> usize {
1946        self.inner.len()
1947    }
1948
1949    /// Returns true if the `BytesVec` has a length of 0.
1950    ///
1951    /// # Examples
1952    ///
1953    /// ```
1954    /// use mco_redis::bytes::BytesVec;
1955    ///
1956    /// let b = BytesVec::with_capacity(64);
1957    /// assert!(b.is_empty());
1958    /// ```
1959    #[inline]
1960    pub fn is_empty(&self) -> bool {
1961        self.inner.len() == 0
1962    }
1963
1964    /// Returns the number of bytes the `BytesVec` can hold without reallocating.
1965    ///
1966    /// # Examples
1967    ///
1968    /// ```
1969    /// use mco_redis::bytes::BytesVec;
1970    ///
1971    /// let b = BytesVec::with_capacity(64);
1972    /// assert_eq!(b.capacity(), 64);
1973    /// ```
1974    #[inline]
1975    pub fn capacity(&self) -> usize {
1976        self.inner.capacity()
1977    }
1978
1979    /// Converts `self` into an immutable `Bytes`.
1980    ///
1981    /// The conversion is zero cost and is used to indicate that the slice
1982    /// referenced by the handle will no longer be mutated. Once the conversion
1983    /// is done, the handle can be cloned and shared across threads.
1984    ///
1985    /// # Examples
1986    ///
1987    /// ```
1988    /// use mco_redis::bytes::{BytesVec, BufMut};
1989    /// use std::thread;
1990    ///
1991    /// let mut b = BytesVec::with_capacity(64);
1992    /// b.put("hello world");
1993    /// let b1 = b.freeze();
1994    /// let b2 = b1.clone();
1995    ///
1996    /// let th = thread::spawn(move || {
1997    ///     assert_eq!(&b1[..], b"hello world");
1998    /// });
1999    ///
2000    /// assert_eq!(&b2[..], b"hello world");
2001    /// th.join().unwrap();
2002    /// ```
2003    #[inline]
2004    pub fn freeze(self) -> Bytes {
2005        Bytes {
2006            inner: self.inner.into_inner(),
2007        }
2008    }
2009
2010    /// Removes the bytes from the current view, returning them in a new
2011    /// `Bytes` instance.
2012    ///
2013    /// Afterwards, `self` will be empty, but will retain any additional
2014    /// capacity that it had before the operation. This is identical to
2015    /// `self.split_to(self.len())`.
2016    ///
2017    /// This is an `O(1)` operation that just increases the reference count and
2018    /// sets a few indices.
2019    ///
2020    /// # Examples
2021    ///
2022    /// ```
2023    /// use mco_redis::bytes::{BytesVec, BufMut};
2024    ///
2025    /// let mut buf = BytesVec::with_capacity(1024);
2026    /// buf.put(&b"hello world"[..]);
2027    ///
2028    /// let other = buf.split();
2029    ///
2030    /// assert!(buf.is_empty());
2031    /// assert_eq!(1013, buf.capacity());
2032    ///
2033    /// assert_eq!(other, b"hello world"[..]);
2034    /// ```
2035    pub fn split(&mut self) -> BytesMut {
2036        self.split_to(self.len())
2037    }
2038
2039    /// Splits the buffer into two at the given index.
2040    ///
2041    /// Afterwards `self` contains elements `[at, len)`, and the returned `Bytes`
2042    /// contains elements `[0, at)`.
2043    ///
2044    /// This is an `O(1)` operation that just increases the reference count and
2045    /// sets a few indices.
2046    ///
2047    /// # Examples
2048    ///
2049    /// ```
2050    /// use mco_redis::bytes::BytesVec;
2051    ///
2052    /// let mut a = BytesVec::copy_from_slice(&b"hello world"[..]);
2053    /// let mut b = a.split_to(5);
2054    ///
2055    /// a[0] = b'!';
2056    ///
2057    /// assert_eq!(&a[..], b"!world");
2058    /// assert_eq!(&b[..], b"hello");
2059    /// ```
2060    ///
2061    /// # Panics
2062    ///
2063    /// Panics if `at > len`.
2064    pub fn split_to(&mut self, at: usize) -> BytesMut {
2065        assert!(at <= self.len());
2066
2067        BytesMut {
2068            inner: self.inner.split_to(at, false),
2069        }
2070    }
2071
2072    /// Shortens the buffer, keeping the first `len` bytes and dropping the
2073    /// rest.
2074    ///
2075    /// If `len` is greater than the buffer's current length, this has no
2076    /// effect.
2077    ///
2078    /// The [`split_off`] method can emulate `truncate`, but this causes the
2079    /// excess bytes to be returned instead of dropped.
2080    ///
2081    /// # Examples
2082    ///
2083    /// ```
2084    /// use mco_redis::bytes::BytesVec;
2085    ///
2086    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2087    /// buf.truncate(5);
2088    /// assert_eq!(buf, b"hello"[..]);
2089    /// ```
2090    ///
2091    /// [`split_off`]: #method.split_off
2092    pub fn truncate(&mut self, len: usize) {
2093        self.inner.truncate(len);
2094    }
2095
2096    /// Clears the buffer, removing all data.
2097    ///
2098    /// # Examples
2099    ///
2100    /// ```
2101    /// use mco_redis::bytes::BytesVec;
2102    ///
2103    /// let mut buf = BytesVec::copy_from_slice(&b"hello world"[..]);
2104    /// buf.clear();
2105    /// assert!(buf.is_empty());
2106    /// ```
2107    pub fn clear(&mut self) {
2108        self.truncate(0);
2109    }
2110
2111    /// Resizes the buffer so that `len` is equal to `new_len`.
2112    ///
2113    /// If `new_len` is greater than `len`, the buffer is extended by the
2114    /// difference with each additional byte set to `value`. If `new_len` is
2115    /// less than `len`, the buffer is simply truncated.
2116    ///
2117    /// # Panics
2118    ///
2119    /// Panics if `new_len` greater than 60bit for 64bit systems
2120    /// and 28bit for 32bit systems
2121    ///
2122    /// # Examples
2123    ///
2124    /// ```
2125    /// use mco_redis::bytes::BytesVec;
2126    ///
2127    /// let mut buf = BytesVec::new();
2128    ///
2129    /// buf.resize(3, 0x1);
2130    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
2131    ///
2132    /// buf.resize(2, 0x2);
2133    /// assert_eq!(&buf[..], &[0x1, 0x1]);
2134    ///
2135    /// buf.resize(4, 0x3);
2136    /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
2137    /// ```
2138    #[inline]
2139    pub fn resize(&mut self, new_len: usize, value: u8) {
2140        self.inner.resize(new_len, value);
2141    }
2142
2143    /// Sets the length of the buffer.
2144    ///
2145    /// This will explicitly set the size of the buffer without actually
2146    /// modifying the data, so it is up to the caller to ensure that the data
2147    /// has been initialized.
2148    ///
2149    /// # Examples
2150    ///
2151    /// ```
2152    /// use mco_redis::bytes::BytesVec;
2153    ///
2154    /// let mut b = BytesVec::copy_from_slice(&b"hello world"[..]);
2155    ///
2156    /// unsafe {
2157    ///     b.set_len(5);
2158    /// }
2159    ///
2160    /// assert_eq!(&b[..], b"hello");
2161    ///
2162    /// unsafe {
2163    ///     b.set_len(11);
2164    /// }
2165    ///
2166    /// assert_eq!(&b[..], b"hello world");
2167    /// ```
2168    ///
2169    /// # Panics
2170    ///
2171    /// This method will panic if `len` is out of bounds for the underlying
2172    /// slice or if it comes after the `end` of the configured window.
2173    #[inline]
2174    #[allow(clippy::missing_safety_doc)]
2175    pub unsafe fn set_len(&mut self, len: usize) {
2176        self.inner.set_len(len)
2177    }
2178
2179    /// Reserves capacity for at least `additional` more bytes to be inserted
2180    /// into the given `BytesVec`.
2181    ///
2182    /// More than `additional` bytes may be reserved in order to avoid frequent
2183    /// reallocations. A call to `reserve` may result in an allocation.
2184    ///
2185    /// Before allocating new buffer space, the function will attempt to reclaim
2186    /// space in the existing buffer. If the current handle references a small
2187    /// view in the original buffer and all other handles have been dropped,
2188    /// and the requested capacity is less than or equal to the existing
2189    /// buffer's capacity, then the current view will be copied to the front of
2190    /// the buffer and the handle will take ownership of the full buffer.
2191    ///
2192    /// # Panics
2193    ///
2194    /// Panics if new capacity is greater than 60bit for 64bit systems
2195    /// and 28bit for 32bit systems
2196    ///
2197    /// # Examples
2198    ///
2199    /// In the following example, a new buffer is allocated.
2200    ///
2201    /// ```
2202    /// use mco_redis::bytes::BytesVec;
2203    ///
2204    /// let mut buf = BytesVec::copy_from_slice(&b"hello"[..]);
2205    /// buf.reserve(64);
2206    /// assert!(buf.capacity() >= 69);
2207    /// ```
2208    ///
2209    /// In the following example, the existing buffer is reclaimed.
2210    ///
2211    /// ```
2212    /// use mco_redis::bytes::{BytesVec, BufMut};
2213    ///
2214    /// let mut buf = BytesVec::with_capacity(128);
2215    /// buf.put(&[0; 64][..]);
2216    ///
2217    /// let ptr = buf.as_ptr();
2218    /// let other = buf.split();
2219    ///
2220    /// assert!(buf.is_empty());
2221    /// assert_eq!(buf.capacity(), 64);
2222    ///
2223    /// drop(other);
2224    /// buf.reserve(128);
2225    ///
2226    /// assert_eq!(buf.capacity(), 128);
2227    /// assert_eq!(buf.as_ptr(), ptr);
2228    /// ```
2229    ///
2230    /// # Panics
2231    ///
2232    /// Panics if the new capacity overflows `usize`.
2233    #[inline]
2234    pub fn reserve(&mut self, additional: usize) {
2235        let len = self.len();
2236        let rem = self.capacity() - len;
2237
2238        if additional <= rem {
2239            // The handle can already store at least `additional` more bytes, so
2240            // there is no further work needed to be done.
2241            return;
2242        }
2243
2244        self.inner.reserve_inner(additional);
2245    }
2246
2247    /// Appends given bytes to this object.
2248    ///
2249    /// If this `BytesVec` object has not enough capacity, it is resized first.
2250    /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
2251    ///
2252    /// # Examples
2253    ///
2254    /// ```
2255    /// use mco_redis::bytes::BytesVec;
2256    ///
2257    /// let mut buf = BytesVec::with_capacity(0);
2258    /// buf.extend_from_slice(b"aaabbb");
2259    /// buf.extend_from_slice(b"cccddd");
2260    ///
2261    /// assert_eq!(b"aaabbbcccddd", &buf[..]);
2262    /// ```
2263    #[inline]
2264    pub fn extend_from_slice(&mut self, extend: &[u8]) {
2265        self.put_slice(extend);
2266    }
2267
2268    /// Run provided function with `BytesMut` instance that contains current data.
2269    #[inline]
2270    pub fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2271    where
2272        F: FnOnce(&mut BytesMut) -> R,
2273    {
2274        self.inner.with_bytes_mut(f)
2275    }
2276
2277    /// Returns an iterator over the bytes contained by the buffer.
2278    ///
2279    /// # Examples
2280    ///
2281    /// ```
2282    /// use mco_redis::bytes::{Buf, BytesVec};
2283    ///
2284    /// let buf = BytesVec::copy_from_slice(&b"abc"[..]);
2285    /// let mut iter = buf.iter();
2286    ///
2287    /// assert_eq!(iter.next().map(|b| *b), Some(b'a'));
2288    /// assert_eq!(iter.next().map(|b| *b), Some(b'b'));
2289    /// assert_eq!(iter.next().map(|b| *b), Some(b'c'));
2290    /// assert_eq!(iter.next(), None);
2291    /// ```
2292    #[inline]
2293    pub fn iter(&'_ self) -> std::slice::Iter<'_, u8> {
2294        self.chunk().iter()
2295    }
2296
2297    pub(crate) fn move_to_pool(&mut self, pool: PoolRef) {
2298        self.inner.move_to_pool(pool);
2299    }
2300}
2301
2302impl Buf for BytesVec {
2303    #[inline]
2304    fn remaining(&self) -> usize {
2305        self.len()
2306    }
2307
2308    #[inline]
2309    fn chunk(&self) -> &[u8] {
2310        self.inner.as_ref()
2311    }
2312
2313    #[inline]
2314    fn advance(&mut self, cnt: usize) {
2315        assert!(
2316            cnt <= self.inner.as_ref().len(),
2317            "cannot advance past `remaining`"
2318        );
2319        unsafe {
2320            self.inner.set_start(cnt as u32);
2321        }
2322    }
2323}
2324
2325impl BufMut for BytesVec {
2326    #[inline]
2327    fn remaining_mut(&self) -> usize {
2328        self.capacity() - self.len()
2329    }
2330
2331    #[inline]
2332    unsafe fn advance_mut(&mut self, cnt: usize) {
2333        let new_len = self.len() + cnt;
2334
2335        // This call will panic if `cnt` is too big
2336        self.inner.set_len(new_len);
2337    }
2338
2339    #[inline]
2340    fn chunk_mut(&mut self) -> &mut UninitSlice {
2341        let len = self.len();
2342
2343        unsafe {
2344            // This will never panic as `len` can never become invalid
2345            let ptr = &mut self.inner.as_raw()[len..];
2346
2347            UninitSlice::from_raw_parts_mut(ptr.as_mut_ptr(), self.capacity() - len)
2348        }
2349    }
2350
2351    #[inline]
2352    fn put_slice(&mut self, src: &[u8]) {
2353        let len = src.len();
2354        self.reserve(len);
2355
2356        unsafe {
2357            ptr::copy_nonoverlapping(
2358                src.as_ptr(),
2359                self.chunk_mut().as_mut_ptr() as *mut u8,
2360                len,
2361            );
2362            self.advance_mut(len);
2363        }
2364    }
2365
2366    #[inline]
2367    fn put_u8(&mut self, n: u8) {
2368        self.reserve(1);
2369        self.inner.put_u8(n);
2370    }
2371
2372    #[inline]
2373    fn put_i8(&mut self, n: i8) {
2374        self.reserve(1);
2375        self.put_u8(n as u8);
2376    }
2377}
2378
2379impl AsRef<[u8]> for BytesVec {
2380    #[inline]
2381    fn as_ref(&self) -> &[u8] {
2382        self.inner.as_ref()
2383    }
2384}
2385
2386impl AsMut<[u8]> for BytesVec {
2387    #[inline]
2388    fn as_mut(&mut self) -> &mut [u8] {
2389        self.inner.as_mut()
2390    }
2391}
2392
2393impl Deref for BytesVec {
2394    type Target = [u8];
2395
2396    #[inline]
2397    fn deref(&self) -> &[u8] {
2398        self.as_ref()
2399    }
2400}
2401
2402impl DerefMut for BytesVec {
2403    #[inline]
2404    fn deref_mut(&mut self) -> &mut [u8] {
2405        self.inner.as_mut()
2406    }
2407}
2408
2409impl Eq for BytesVec {}
2410
2411impl PartialEq for BytesVec {
2412    #[inline]
2413    fn eq(&self, other: &BytesVec) -> bool {
2414        self.inner.as_ref() == other.inner.as_ref()
2415    }
2416}
2417
2418impl Default for BytesVec {
2419    #[inline]
2420    fn default() -> BytesVec {
2421        BytesVec::new()
2422    }
2423}
2424
2425impl Borrow<[u8]> for BytesVec {
2426    #[inline]
2427    fn borrow(&self) -> &[u8] {
2428        self.as_ref()
2429    }
2430}
2431
2432impl BorrowMut<[u8]> for BytesVec {
2433    #[inline]
2434    fn borrow_mut(&mut self) -> &mut [u8] {
2435        self.as_mut()
2436    }
2437}
2438
2439impl fmt::Debug for BytesVec {
2440    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2441        fmt::Debug::fmt(&debug::BsDebug(self.inner.as_ref()), fmt)
2442    }
2443}
2444
2445impl fmt::Write for BytesVec {
2446    #[inline]
2447    fn write_str(&mut self, s: &str) -> fmt::Result {
2448        if self.remaining_mut() >= s.len() {
2449            self.put_slice(s.as_bytes());
2450            Ok(())
2451        } else {
2452            Err(fmt::Error)
2453        }
2454    }
2455
2456    #[inline]
2457    fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
2458        fmt::write(self, args)
2459    }
2460}
2461
2462impl IntoIterator for BytesVec {
2463    type Item = u8;
2464    type IntoIter = IntoIter<BytesVec>;
2465
2466    fn into_iter(self) -> Self::IntoIter {
2467        IntoIter::new(self)
2468    }
2469}
2470
2471impl<'a> IntoIterator for &'a BytesVec {
2472    type Item = &'a u8;
2473    type IntoIter = std::slice::Iter<'a, u8>;
2474
2475    fn into_iter(self) -> Self::IntoIter {
2476        self.as_ref().iter()
2477    }
2478}
2479
2480impl FromIterator<u8> for BytesVec {
2481    fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
2482        let iter = into_iter.into_iter();
2483        let (min, maybe_max) = iter.size_hint();
2484
2485        let mut out = BytesVec::with_capacity(maybe_max.unwrap_or(min));
2486        for i in iter {
2487            out.reserve(1);
2488            out.put_u8(i);
2489        }
2490
2491        out
2492    }
2493}
2494
2495impl<'a> FromIterator<&'a u8> for BytesVec {
2496    fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
2497        into_iter.into_iter().copied().collect::<BytesVec>()
2498    }
2499}
2500
2501impl Extend<u8> for BytesVec {
2502    fn extend<T>(&mut self, iter: T)
2503    where
2504        T: IntoIterator<Item = u8>,
2505    {
2506        let iter = iter.into_iter();
2507
2508        let (lower, _) = iter.size_hint();
2509        self.reserve(lower);
2510
2511        for b in iter {
2512            self.put_u8(b);
2513        }
2514    }
2515}
2516
2517impl<'a> Extend<&'a u8> for BytesVec {
2518    fn extend<T>(&mut self, iter: T)
2519    where
2520        T: IntoIterator<Item = &'a u8>,
2521    {
2522        self.extend(iter.into_iter().copied())
2523    }
2524}
2525
2526struct InnerVec(NonNull<SharedVec>);
2527
2528impl InnerVec {
2529    #[inline]
2530    fn with_capacity(capacity: usize, pool: PoolRef) -> InnerVec {
2531        Self::from_slice(capacity, &[], pool)
2532    }
2533
2534    #[inline]
2535    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> InnerVec {
2536        // vec must be aligned to SharedVec instead of u8
2537        let mut vec_cap = (cap / SHARED_VEC_SIZE) + 1;
2538        if cap % SHARED_VEC_SIZE != 0 {
2539            vec_cap += 1;
2540        }
2541        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
2542        unsafe {
2543            // Store data in vec
2544            let len = src.len() as u32;
2545            let cap = vec.capacity() * SHARED_VEC_SIZE;
2546            let shared_ptr = vec.as_mut_ptr();
2547            mem::forget(vec);
2548            pool.acquire(cap);
2549
2550            let ptr = shared_ptr.add(1) as *mut u8;
2551            if !src.is_empty() {
2552                ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
2553            }
2554            ptr::write(
2555                shared_ptr,
2556                SharedVec {
2557                    len,
2558                    cap,
2559                    pool,
2560                    ref_count: AtomicUsize::new(1),
2561                    offset: SHARED_VEC_SIZE as u32,
2562                },
2563            );
2564
2565            InnerVec(NonNull::new_unchecked(shared_ptr))
2566        }
2567    }
2568
2569    #[inline]
2570    fn move_to_pool(&mut self, pool: PoolRef) {
2571        unsafe {
2572            let inner = self.as_inner();
2573            if pool != inner.pool {
2574                pool.acquire(inner.cap);
2575                let pool = mem::replace(&mut inner.pool, pool);
2576                pool.release(inner.cap);
2577            }
2578        }
2579    }
2580
2581    /// Return a slice for the handle's view into the shared buffer
2582    #[inline]
2583    fn as_ref(&self) -> &[u8] {
2584        unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
2585    }
2586
2587    /// Return a mutable slice for the handle's view into the shared buffer
2588    #[inline]
2589    fn as_mut(&mut self) -> &mut [u8] {
2590        unsafe { slice::from_raw_parts_mut(self.as_ptr(), self.len()) }
2591    }
2592
2593    /// Return a mutable slice for the handle's view into the shared buffer
2594    /// including potentially uninitialized bytes.
2595    #[inline]
2596    unsafe fn as_raw(&mut self) -> &mut [u8] {
2597        slice::from_raw_parts_mut(self.as_ptr(), self.capacity())
2598    }
2599
2600    /// Return a raw pointer to data
2601    #[inline]
2602    unsafe fn as_ptr(&self) -> *mut u8 {
2603        (self.0.as_ptr() as *mut u8).add((*self.0.as_ptr()).offset as usize)
2604    }
2605
2606    #[inline]
2607    unsafe fn as_inner(&mut self) -> &mut SharedVec {
2608        self.0.as_mut()
2609    }
2610
2611    /// Insert a byte into the next slot and advance the len by 1.
2612    #[inline]
2613    fn put_u8(&mut self, n: u8) {
2614        unsafe {
2615            let inner = self.as_inner();
2616            let len = inner.len as usize;
2617            assert!(len < (inner.cap - inner.offset as usize));
2618            inner.len += 1;
2619            *self.as_ptr().add(len) = n;
2620        }
2621    }
2622
2623    #[inline]
2624    fn len(&self) -> usize {
2625        unsafe { (*self.0.as_ptr()).len as usize }
2626    }
2627
2628    /// slice.
2629    #[inline]
2630    unsafe fn set_len(&mut self, len: usize) {
2631        let inner = self.as_inner();
2632        assert!(len <= (inner.cap - inner.offset as usize) && len < u32::MAX as usize);
2633        inner.len = len as u32;
2634    }
2635
2636    #[inline]
2637    fn capacity(&self) -> usize {
2638        unsafe { (*self.0.as_ptr()).cap - (*self.0.as_ptr()).offset as usize }
2639    }
2640
2641    fn into_inner(mut self) -> Inner {
2642        unsafe {
2643            let ptr = self.as_ptr();
2644
2645            if self.len() <= INLINE_CAP {
2646                Inner::from_ptr_inline(ptr, self.len())
2647            } else {
2648                let inner = self.as_inner();
2649
2650                let inner = Inner {
2651                    ptr,
2652                    len: inner.len as usize,
2653                    cap: inner.cap - inner.offset as usize,
2654                    arc: NonNull::new_unchecked(
2655                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2656                    ),
2657                };
2658                mem::forget(self);
2659                inner
2660            }
2661        }
2662    }
2663
2664    fn with_bytes_mut<F, R>(&mut self, f: F) -> R
2665    where
2666        F: FnOnce(&mut BytesMut) -> R,
2667    {
2668        unsafe {
2669            // create Inner for BytesMut
2670            let ptr = self.as_ptr();
2671            let inner = self.as_inner();
2672            let inner = Inner {
2673                ptr,
2674                len: inner.len as usize,
2675                cap: inner.cap - inner.offset as usize,
2676                arc: NonNull::new_unchecked(
2677                    (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2678                ),
2679            };
2680
2681            // run function
2682            let mut buf = BytesMut { inner };
2683            let result = f(&mut buf);
2684
2685            // convert BytesMut back to InnerVec
2686            let kind = buf.inner.kind();
2687            let new_inner =
2688                // only KIND_VEC could be converted to self, otherwise we have to copy data
2689                if kind == KIND_INLINE || kind == KIND_STATIC || kind == KIND_ARC {
2690                    InnerVec::from_slice(
2691                        buf.inner.capacity(),
2692                        buf.inner.as_ref(),
2693                        buf.inner.pool(),
2694                    )
2695                } else if kind == KIND_VEC {
2696                    let ptr = buf.inner.shared_vec();
2697                    let offset = buf.inner.ptr as usize - ptr as usize;
2698
2699                    // we cannot use shared vec if BytesMut points to inside of vec
2700                    if buf.inner.cap < (*ptr).cap - offset {
2701                        InnerVec::from_slice(
2702                            buf.inner.capacity(),
2703                            buf.inner.as_ref(),
2704                            buf.inner.pool(),
2705                        )
2706                    } else {
2707                        // BytesMut owns rest of the vec, so re-use
2708                        (*ptr).len = buf.len() as u32;
2709                        (*ptr).offset = offset as u32;
2710                        let inner = InnerVec(NonNull::new_unchecked(ptr));
2711                        mem::forget(buf); // reuse bytes
2712                        inner
2713                    }
2714                } else {
2715                    panic!()
2716                };
2717
2718            // drop old inner, we cannot drop because BytesMut used it
2719            let old = mem::replace(self, new_inner);
2720            mem::forget(old);
2721
2722            result
2723        }
2724    }
2725
2726    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
2727        unsafe {
2728            let ptr = self.as_ptr();
2729
2730            let other = if create_inline && at <= INLINE_CAP {
2731                Inner::from_ptr_inline(ptr, at)
2732            } else {
2733                let inner = self.as_inner();
2734                let old_size = inner.ref_count.fetch_add(1, Relaxed);
2735                if old_size == usize::MAX {
2736                    abort();
2737                }
2738
2739                Inner {
2740                    ptr,
2741                    len: at,
2742                    cap: at,
2743                    arc: NonNull::new_unchecked(
2744                        (self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
2745                    ),
2746                }
2747            };
2748            self.set_start(at as u32);
2749
2750            other
2751        }
2752    }
2753
2754    fn truncate(&mut self, len: usize) {
2755        unsafe {
2756            if len <= self.len() {
2757                self.set_len(len);
2758            }
2759        }
2760    }
2761
2762    fn resize(&mut self, new_len: usize, value: u8) {
2763        let len = self.len();
2764        if new_len > len {
2765            let additional = new_len - len;
2766            self.reserve(additional);
2767            unsafe {
2768                let dst = self.as_raw()[len..].as_mut_ptr();
2769                ptr::write_bytes(dst, value, additional);
2770                self.set_len(new_len);
2771            }
2772        } else {
2773            self.truncate(new_len);
2774        }
2775    }
2776
2777    #[inline]
2778    fn reserve(&mut self, additional: usize) {
2779        let len = self.len();
2780        let rem = self.capacity() - len;
2781
2782        if additional <= rem {
2783            // The handle can already store at least `additional` more bytes, so
2784            // there is no further work needed to be done.
2785            return;
2786        }
2787
2788        self.reserve_inner(additional)
2789    }
2790
2791    #[inline]
2792    // In separate function to allow the short-circuits in `reserve` to
2793    // be inline-able. Significant helps performance.
2794    fn reserve_inner(&mut self, additional: usize) {
2795        let len = self.len();
2796
2797        // Reserving involves abandoning the currently shared buffer and
2798        // allocating a new vector with the requested capacity.
2799        let new_cap = len + additional;
2800
2801        unsafe {
2802            let inner = self.as_inner();
2803            let vec_cap = inner.cap - SHARED_VEC_SIZE;
2804
2805            // try to reclaim the buffer. This is possible if the current
2806            // handle is the only outstanding handle pointing to the buffer.
2807            if inner.is_unique() && vec_cap >= new_cap {
2808                let offset = inner.offset;
2809                inner.offset = SHARED_VEC_SIZE as u32;
2810
2811                // The capacity is sufficient, reclaim the buffer
2812                let src = (self.0.as_ptr() as *mut u8).add(offset as usize);
2813                let dst = (self.0.as_ptr() as *mut u8).add(SHARED_VEC_SIZE);
2814                ptr::copy(src, dst, len);
2815            } else {
2816                // Create a new vector storage
2817                let pool = inner.pool;
2818                *self = InnerVec::from_slice(new_cap, self.as_ref(), pool);
2819            }
2820        }
2821    }
2822
2823    unsafe fn set_start(&mut self, start: u32) {
2824        // Setting the start to 0 is a no-op, so return early if this is the
2825        // case.
2826        if start == 0 {
2827            return;
2828        }
2829
2830        let inner = self.as_inner();
2831        assert!(start <= inner.cap as u32);
2832
2833        // Updating the start of the view is setting `offset` to point to the
2834        // new start and updating the `len` field to reflect the new length
2835        // of the view.
2836        inner.offset += start;
2837
2838        if inner.len >= start {
2839            inner.len -= start;
2840        } else {
2841            inner.len = 0;
2842        }
2843    }
2844}
2845
2846impl Drop for InnerVec {
2847    fn drop(&mut self) {
2848        release_shared_vec(self.0.as_ptr());
2849    }
2850}
2851
2852/*
2853 *
2854 * ===== Inner =====
2855 *
2856 */
2857
2858impl Inner {
2859    #[inline]
2860    const fn from_static(bytes: &'static [u8]) -> Inner {
2861        let ptr = bytes.as_ptr() as *mut u8;
2862
2863        Inner {
2864            // `arc` won't ever store a pointer. Instead, use it to
2865            // track the fact that the `Bytes` handle is backed by a
2866            // static buffer.
2867            arc: unsafe { NonNull::new_unchecked(KIND_STATIC as *mut Shared) },
2868            ptr,
2869            len: bytes.len(),
2870            cap: bytes.len(),
2871        }
2872    }
2873
2874    #[inline]
2875    const fn empty_inline() -> Inner {
2876        Inner {
2877            arc: unsafe { NonNull::new_unchecked(KIND_INLINE as *mut Shared) },
2878            ptr: 0 as *mut u8,
2879            len: 0,
2880            cap: 0,
2881        }
2882    }
2883
2884    #[inline]
2885    fn from_vec(mut vec: Vec<u8>, pool: PoolRef) -> Inner {
2886        let len = vec.len();
2887        let cap = vec.capacity();
2888        let ptr = vec.as_mut_ptr();
2889        pool.acquire(cap);
2890
2891        // Store data in arc
2892        let shared = Box::into_raw(Box::new(Shared {
2893            vec,
2894            pool,
2895            ref_count: AtomicUsize::new(1),
2896        }));
2897
2898        // The pointer should be aligned, so this assert should always succeed.
2899        debug_assert!(0 == (shared as usize & KIND_MASK));
2900
2901        // Create new arc, so atomic operations can be avoided.
2902        Inner {
2903            ptr,
2904            len,
2905            cap,
2906            arc: unsafe { NonNull::new_unchecked(shared) },
2907        }
2908    }
2909
2910    #[inline]
2911    fn with_capacity(capacity: usize, pool: PoolRef) -> Inner {
2912        Inner::from_slice(capacity, &[], pool)
2913    }
2914
2915    #[inline]
2916    fn from_slice(cap: usize, src: &[u8], pool: PoolRef) -> Inner {
2917        // vec must be aligned to SharedVec instead of u8
2918        let mut vec_cap = (cap / SHARED_VEC_SIZE) + 1;
2919        if cap % SHARED_VEC_SIZE != 0 {
2920            vec_cap += 1;
2921        }
2922        let mut vec = Vec::<SharedVec>::with_capacity(vec_cap);
2923        unsafe {
2924            // Store data in vec
2925            let len = src.len();
2926            let full_cap = vec.capacity() * SHARED_VEC_SIZE;
2927            let cap = full_cap - SHARED_VEC_SIZE;
2928            let shared_ptr = vec.as_mut_ptr();
2929            mem::forget(vec);
2930            pool.acquire(full_cap);
2931
2932            let ptr = shared_ptr.add(1) as *mut u8;
2933            ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
2934            ptr::write(
2935                shared_ptr,
2936                SharedVec {
2937                    pool,
2938                    cap: full_cap,
2939                    ref_count: AtomicUsize::new(1),
2940                    len: 0,
2941                    offset: 0,
2942                },
2943            );
2944
2945            // Create new arc, so atomic operations can be avoided.
2946            Inner {
2947                len,
2948                cap,
2949                ptr,
2950                arc: NonNull::new_unchecked(
2951                    (shared_ptr as usize ^ KIND_VEC) as *mut Shared,
2952                ),
2953            }
2954        }
2955    }
2956
2957    #[inline]
2958    fn from_slice_inline(src: &[u8]) -> Inner {
2959        unsafe { Inner::from_ptr_inline(src.as_ptr(), src.len()) }
2960    }
2961
2962    #[inline]
2963    unsafe fn from_ptr_inline(src: *const u8, len: usize) -> Inner {
2964        // Using uninitialized memory is ~30% faster
2965        #[allow(invalid_value, clippy::uninit_assumed_init)]
2966        let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
2967        inner.arc = NonNull::new_unchecked(KIND_INLINE as *mut Shared);
2968
2969        let dst = inner.inline_ptr();
2970        ptr::copy(src, dst, len);
2971        inner.set_inline_len(len);
2972        inner
2973    }
2974
2975    #[inline]
2976    fn pool(&self) -> PoolRef {
2977        let kind = self.kind();
2978
2979        if kind == KIND_VEC {
2980            unsafe { (*self.shared_vec()).pool }
2981        } else if kind == KIND_ARC {
2982            unsafe { (*self.arc.as_ptr()).pool }
2983        } else {
2984            PoolId::DEFAULT.pool_ref()
2985        }
2986    }
2987
2988    #[inline]
2989    fn move_to_pool(&mut self, pool: PoolRef) {
2990        let kind = self.kind();
2991
2992        if kind == KIND_VEC {
2993            let vec = self.shared_vec();
2994            unsafe {
2995                let cap = (*vec).cap;
2996                pool.acquire(cap);
2997                let pool = mem::replace(&mut (*vec).pool, pool);
2998                pool.release(cap);
2999            }
3000        } else if kind == KIND_ARC {
3001            let arc = self.arc.as_ptr();
3002            unsafe {
3003                let cap = (*arc).vec.capacity();
3004                pool.acquire(cap);
3005                let pool = mem::replace(&mut (*arc).pool, pool);
3006                pool.release(cap);
3007            }
3008        }
3009    }
3010
3011    /// Return a slice for the handle's view into the shared buffer
3012    #[inline]
3013    fn as_ref(&self) -> &[u8] {
3014        unsafe {
3015            if self.is_inline() {
3016                slice::from_raw_parts(self.inline_ptr(), self.inline_len())
3017            } else {
3018                slice::from_raw_parts(self.ptr, self.len)
3019            }
3020        }
3021    }
3022
3023    /// Return a mutable slice for the handle's view into the shared buffer
3024    #[inline]
3025    fn as_mut(&mut self) -> &mut [u8] {
3026        debug_assert!(!self.is_static());
3027
3028        unsafe {
3029            if self.is_inline() {
3030                slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
3031            } else {
3032                slice::from_raw_parts_mut(self.ptr, self.len)
3033            }
3034        }
3035    }
3036
3037    /// Return a mutable slice for the handle's view into the shared buffer
3038    /// including potentially uninitialized bytes.
3039    #[inline]
3040    unsafe fn as_raw(&mut self) -> &mut [u8] {
3041        debug_assert!(!self.is_static());
3042
3043        if self.is_inline() {
3044            slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
3045        } else {
3046            slice::from_raw_parts_mut(self.ptr, self.cap)
3047        }
3048    }
3049
3050    /// Return a raw pointer to data
3051    #[inline]
3052    unsafe fn as_ptr(&mut self) -> *mut u8 {
3053        if self.is_inline() {
3054            self.inline_ptr()
3055        } else {
3056            self.ptr
3057        }
3058    }
3059
3060    /// Insert a byte into the next slot and advance the len by 1.
3061    #[inline]
3062    fn put_u8(&mut self, n: u8) {
3063        if self.is_inline() {
3064            let len = self.inline_len();
3065            assert!(len < INLINE_CAP);
3066            unsafe {
3067                *self.inline_ptr().add(len) = n;
3068            }
3069            self.set_inline_len(len + 1);
3070        } else {
3071            assert!(self.len < self.cap);
3072            unsafe {
3073                *self.ptr.add(self.len) = n;
3074            }
3075            self.len += 1;
3076        }
3077    }
3078
3079    #[inline]
3080    fn len(&self) -> usize {
3081        if self.is_inline() {
3082            self.inline_len()
3083        } else {
3084            self.len
3085        }
3086    }
3087
3088    /// Pointer to the start of the inline buffer
3089    #[inline]
3090    unsafe fn inline_ptr(&self) -> *mut u8 {
3091        (self as *const Inner as *mut Inner as *mut u8).offset(INLINE_DATA_OFFSET)
3092    }
3093
3094    #[inline]
3095    fn to_inline(&self) -> Inner {
3096        unsafe {
3097            // Using uninitialized memory is ~30% faster
3098            #[allow(invalid_value, clippy::uninit_assumed_init)]
3099            let mut inner: Inner = mem::MaybeUninit::uninit().assume_init();
3100            inner.arc = NonNull::new_unchecked(KIND_INLINE as *mut Shared);
3101            let len = self.len();
3102            inner.as_raw()[..len].copy_from_slice(self.as_ref());
3103            inner.set_inline_len(len);
3104            inner
3105        }
3106    }
3107
3108    #[inline]
3109    fn inline_len(&self) -> usize {
3110        // This is undefind behavior due to a data race, but experimental
3111        // evidence shows that it works in practice (discussion:
3112        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3113        (self.arc.as_ptr() as usize & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
3114    }
3115
3116    /// Set the length of the inline buffer. This is done by writing to the
3117    /// least significant byte of the `arc` field.
3118    #[inline]
3119    fn set_inline_len(&mut self, len: usize) {
3120        debug_assert!(len <= INLINE_CAP);
3121        self.arc = unsafe {
3122            NonNull::new_unchecked(
3123                ((self.arc.as_ptr() as usize & !INLINE_LEN_MASK)
3124                    | (len << INLINE_LEN_OFFSET)) as _,
3125            )
3126        };
3127    }
3128
3129    /// slice.
3130    #[inline]
3131    unsafe fn set_len(&mut self, len: usize) {
3132        if self.is_inline() {
3133            assert!(len <= INLINE_CAP);
3134            self.set_inline_len(len);
3135        } else {
3136            assert!(len <= self.cap);
3137            self.len = len;
3138        }
3139    }
3140
3141    #[inline]
3142    fn is_empty(&self) -> bool {
3143        self.len() == 0
3144    }
3145
3146    #[inline]
3147    fn capacity(&self) -> usize {
3148        if self.is_inline() {
3149            INLINE_CAP
3150        } else {
3151            self.cap
3152        }
3153    }
3154
3155    fn split_off(&mut self, at: usize, create_inline: bool) -> Inner {
3156        let other = unsafe {
3157            if create_inline && self.len() - at <= INLINE_CAP {
3158                Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at)
3159            } else {
3160                let mut other = self.shallow_clone();
3161                other.set_start(at);
3162                other
3163            }
3164        };
3165        unsafe {
3166            if create_inline && at <= INLINE_CAP {
3167                *self = Inner::from_ptr_inline(self.as_ptr(), at);
3168            } else {
3169                self.set_end(at);
3170            }
3171        }
3172
3173        other
3174    }
3175
3176    fn split_to(&mut self, at: usize, create_inline: bool) -> Inner {
3177        let other = unsafe {
3178            if create_inline && at <= INLINE_CAP {
3179                Inner::from_ptr_inline(self.as_ptr(), at)
3180            } else {
3181                let mut other = self.shallow_clone();
3182                other.set_end(at);
3183                other
3184            }
3185        };
3186        unsafe {
3187            if create_inline && self.len() - at <= INLINE_CAP {
3188                *self = Inner::from_ptr_inline(self.as_ptr().add(at), self.len() - at);
3189            } else {
3190                self.set_start(at);
3191            }
3192        }
3193
3194        other
3195    }
3196
3197    fn truncate(&mut self, len: usize, create_inline: bool) {
3198        unsafe {
3199            if len <= self.len() {
3200                if create_inline && len < INLINE_CAP {
3201                    *self = Inner::from_ptr_inline(self.as_ptr(), len);
3202                } else {
3203                    self.set_len(len);
3204                }
3205            }
3206        }
3207    }
3208
3209    fn resize(&mut self, new_len: usize, value: u8) {
3210        let len = self.len();
3211        if new_len > len {
3212            let additional = new_len - len;
3213            self.reserve(additional);
3214            unsafe {
3215                let dst = self.as_raw()[len..].as_mut_ptr();
3216                ptr::write_bytes(dst, value, additional);
3217                self.set_len(new_len);
3218            }
3219        } else {
3220            self.truncate(new_len, false);
3221        }
3222    }
3223
3224    unsafe fn set_start(&mut self, start: usize) {
3225        // Setting the start to 0 is a no-op, so return early if this is the
3226        // case.
3227        if start == 0 {
3228            return;
3229        }
3230
3231        let kind = self.kind();
3232
3233        // Always check `inline` first, because if the handle is using inline
3234        // data storage, all of the `Inner` struct fields will be gibberish.
3235        if kind == KIND_INLINE {
3236            assert!(start <= INLINE_CAP);
3237
3238            let len = self.inline_len();
3239            if len <= start {
3240                self.set_inline_len(0);
3241            } else {
3242                // `set_start` is essentially shifting data off the front of the
3243                // view. Inlined buffers only track the length of the slice.
3244                // So, to update the start, the data at the new starting point
3245                // is copied to the beginning of the buffer.
3246                let new_len = len - start;
3247
3248                let dst = self.inline_ptr();
3249                let src = (dst as *const u8).add(start);
3250
3251                ptr::copy(src, dst, new_len);
3252
3253                self.set_inline_len(new_len);
3254            }
3255        } else {
3256            assert!(start <= self.cap);
3257
3258            // Updating the start of the view is setting `ptr` to point to the
3259            // new start and updating the `len` field to reflect the new length
3260            // of the view.
3261            self.ptr = self.ptr.add(start);
3262
3263            if self.len >= start {
3264                self.len -= start;
3265            } else {
3266                self.len = 0;
3267            }
3268
3269            self.cap -= start;
3270        }
3271    }
3272
3273    unsafe fn set_end(&mut self, end: usize) {
3274        // Always check `inline` first, because if the handle is using inline
3275        // data storage, all of the `Inner` struct fields will be gibberish.
3276        if self.is_inline() {
3277            assert!(end <= INLINE_CAP);
3278            let new_len = cmp::min(self.inline_len(), end);
3279            self.set_inline_len(new_len);
3280        } else {
3281            assert!(end <= self.cap);
3282
3283            self.cap = end;
3284            self.len = cmp::min(self.len, end);
3285        }
3286    }
3287
3288    /// Checks if it is safe to mutate the memory
3289    fn is_mut_safe(&self) -> bool {
3290        let kind = self.kind();
3291
3292        // Always check `inline` first, because if the handle is using inline
3293        // data storage, all of the `Inner` struct fields will be gibberish.
3294        if kind == KIND_INLINE {
3295            // Inlined buffers can always be mutated as the data is never shared
3296            // across handles.
3297            true
3298        } else if kind == KIND_STATIC {
3299            false
3300        } else if kind == KIND_VEC {
3301            // Otherwise, the underlying buffer is potentially shared with other
3302            // handles, so the ref_count needs to be checked.
3303            unsafe { (*self.shared_vec()).is_unique() }
3304        } else {
3305            // Otherwise, the underlying buffer is potentially shared with other
3306            // handles, so the ref_count needs to be checked.
3307            unsafe { (*self.arc.as_ptr()).is_unique() }
3308        }
3309    }
3310
3311    /// Increments the ref count. This should only be done if it is known that
3312    /// it can be done safely. As such, this fn is not public, instead other
3313    /// fns will use this one while maintaining the guarantees.
3314    /// Parameter `mut_self` should only be set to `true` if caller holds
3315    /// `&mut self` reference.
3316    ///
3317    /// "Safely" is defined as not exposing two `BytesMut` values that point to
3318    /// the same byte window.
3319    ///
3320    /// This function is thread safe.
3321    unsafe fn shallow_clone(&self) -> Inner {
3322        // Always check `inline` first, because if the handle is using inline
3323        // data storage, all of the `Inner` struct fields will be gibberish.
3324        //
3325        // Additionally, if kind is STATIC, then Arc is *never* changed, making
3326        // it safe and faster to check for it now before an atomic acquire.
3327
3328        if self.is_inline_or_static() {
3329            // In this case, a shallow_clone still involves copying the data.
3330            let mut inner: mem::MaybeUninit<Inner> = mem::MaybeUninit::uninit();
3331            ptr::copy_nonoverlapping(self, inner.as_mut_ptr(), 1);
3332            inner.assume_init()
3333        } else {
3334            self.shallow_clone_sync()
3335        }
3336    }
3337
3338    #[cold]
3339    unsafe fn shallow_clone_sync(&self) -> Inner {
3340        // The function requires `&self`, this means that `shallow_clone`
3341        // could be called concurrently.
3342        //
3343        // The first step is to load the value of `arc`. This will determine
3344        // how to proceed. The `Acquire` ordering synchronizes with the
3345        // `compare_and_swap` that comes later in this function. The goal is
3346        // to ensure that if `arc` is currently set to point to a `Shared`,
3347        // that the current thread acquires the associated memory.
3348        let arc: *mut Shared = self.arc.as_ptr();
3349        let kind = arc as usize & KIND_MASK;
3350
3351        if kind == KIND_ARC {
3352            let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
3353            if old_size == usize::MAX {
3354                abort();
3355            }
3356
3357            Inner {
3358                arc: NonNull::new_unchecked(arc),
3359                ..*self
3360            }
3361        } else {
3362            assert!(kind == KIND_VEC);
3363
3364            let vec_arc = (arc as usize & KIND_UNMASK) as *mut SharedVec;
3365            let old_size = (*vec_arc).ref_count.fetch_add(1, Relaxed);
3366            if old_size == usize::MAX {
3367                abort();
3368            }
3369
3370            Inner {
3371                arc: NonNull::new_unchecked(arc),
3372                ..*self
3373            }
3374        }
3375    }
3376
3377    #[inline]
3378    fn reserve(&mut self, additional: usize) {
3379        let len = self.len();
3380        let rem = self.capacity() - len;
3381
3382        if additional <= rem {
3383            // The handle can already store at least `additional` more bytes, so
3384            // there is no further work needed to be done.
3385            return;
3386        }
3387
3388        self.reserve_inner(additional)
3389    }
3390
3391    #[inline]
3392    // In separate function to allow the short-circuits in `reserve` to
3393    // be inline-able. Significant helps performance.
3394    fn reserve_inner(&mut self, additional: usize) {
3395        let len = self.len();
3396        let kind = self.kind();
3397
3398        // Always check `inline` first, because if the handle is using inline
3399        // data storage, all of the `Inner` struct fields will be gibberish.
3400        if kind == KIND_INLINE {
3401            let new_cap = len + additional;
3402
3403            // Promote to a vector
3404            *self = Inner::from_slice(new_cap, self.as_ref(), PoolId::DEFAULT.pool_ref());
3405            return;
3406        }
3407
3408        // Reserving involves abandoning the currently shared buffer and
3409        // allocating a new vector with the requested capacity.
3410        let new_cap = len + additional;
3411
3412        if kind == KIND_VEC {
3413            let vec = self.shared_vec();
3414
3415            unsafe {
3416                let vec_cap = (*vec).cap - SHARED_VEC_SIZE;
3417
3418                // First, try to reclaim the buffer. This is possible if the current
3419                // handle is the only outstanding handle pointing to the buffer.
3420                if (*vec).is_unique() && vec_cap >= new_cap {
3421                    // The capacity is sufficient, reclaim the buffer
3422                    let ptr = (vec as *mut u8).add(SHARED_VEC_SIZE);
3423                    ptr::copy(self.ptr, ptr, len);
3424
3425                    self.ptr = ptr;
3426                    self.cap = vec_cap;
3427                } else {
3428                    // Create a new vector storage
3429                    *self = Inner::from_slice(new_cap, self.as_ref(), (*vec).pool);
3430                }
3431            }
3432        } else {
3433            debug_assert!(kind == KIND_ARC);
3434
3435            let arc = self.arc.as_ptr();
3436            unsafe {
3437                // First, try to reclaim the buffer. This is possible if the current
3438                // handle is the only outstanding handle pointing to the buffer.
3439                if (*arc).is_unique() {
3440                    // This is the only handle to the buffer. It can be reclaimed.
3441                    // However, before doing the work of copying data, check to make
3442                    // sure that the vector has enough capacity.
3443                    let v = &mut (*arc).vec;
3444
3445                    if v.capacity() >= new_cap {
3446                        // The capacity is sufficient, reclaim the buffer
3447                        let ptr = v.as_mut_ptr();
3448
3449                        ptr::copy(self.ptr, ptr, len);
3450
3451                        self.ptr = ptr;
3452                        self.cap = v.capacity();
3453                        return;
3454                    }
3455                }
3456
3457                // Create a new vector storage
3458                *self = Inner::from_slice(new_cap, self.as_ref(), (*arc).pool);
3459            }
3460        }
3461    }
3462
3463    /// Returns true if the buffer is stored inline
3464    #[inline]
3465    fn is_inline(&self) -> bool {
3466        self.kind() == KIND_INLINE
3467    }
3468
3469    #[inline]
3470    fn is_inline_or_static(&self) -> bool {
3471        // The value returned by `kind` isn't itself safe, but the value could
3472        // inform what operations to take, and unsafely do something without
3473        // synchronization.
3474        //
3475        // KIND_INLINE and KIND_STATIC will *never* change, so branches on that
3476        // information is safe.
3477        let kind = self.kind();
3478        kind == KIND_INLINE || kind == KIND_STATIC
3479    }
3480
3481    /// Used for `debug_assert` statements
3482    #[inline]
3483    fn is_static(&self) -> bool {
3484        matches!(self.kind(), KIND_STATIC)
3485    }
3486
3487    #[inline]
3488    fn shared_vec(&self) -> *mut SharedVec {
3489        ((self.arc.as_ptr() as usize) & KIND_UNMASK) as *mut SharedVec
3490    }
3491
3492    #[inline]
3493    fn kind(&self) -> usize {
3494        // This function is going to probably raise some eyebrows. The function
3495        // returns true if the buffer is stored inline. This is done by checking
3496        // the least significant bit in the `arc` field.
3497        //
3498        // Now, you may notice that `arc` is an `AtomicPtr` and this is
3499        // accessing it as a normal field without performing an atomic load...
3500        //
3501        // Again, the function only cares about the least significant bit, and
3502        // this bit is set when `Inner` is created and never changed after that.
3503        // All platforms have atomic "word" operations and won't randomly flip
3504        // bits, so even without any explicit atomic operations, reading the
3505        // flag will be correct.
3506        //
3507        // This is undefined behavior due to a data race, but experimental
3508        // evidence shows that it works in practice (discussion:
3509        // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
3510        //
3511        // This function is very critical performance wise as it is called for
3512        // every operation. Performing an atomic load would mess with the
3513        // compiler's ability to optimize. Simple benchmarks show up to a 10%
3514        // slowdown using a `Relaxed` atomic load on x86.
3515
3516        #[cfg(target_endian = "little")]
3517        #[inline]
3518        fn imp(arc: *mut Shared) -> usize {
3519            (arc as usize) & KIND_MASK
3520        }
3521
3522        #[cfg(target_endian = "big")]
3523        #[inline]
3524        fn imp(arc: *mut Shared) -> usize {
3525            unsafe {
3526                let p: *const usize = arc as *const usize;
3527                *p & KIND_MASK
3528            }
3529        }
3530
3531        imp(self.arc.as_ptr())
3532    }
3533}
3534
3535impl Drop for Inner {
3536    fn drop(&mut self) {
3537        let kind = self.kind();
3538
3539        if kind == KIND_VEC {
3540            release_shared_vec(self.shared_vec());
3541        } else if kind == KIND_ARC {
3542            release_shared(self.arc.as_ptr());
3543        }
3544    }
3545}
3546
3547fn release_shared(ptr: *mut Shared) {
3548    // `Shared` storage... follow the drop steps from Arc.
3549    unsafe {
3550        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3551            return;
3552        }
3553
3554        // This fence is needed to prevent reordering of use of the data and
3555        // deletion of the data.  Because it is marked `Release`, the decreasing
3556        // of the reference count synchronizes with this `Acquire` fence. This
3557        // means that use of the data happens before decreasing the reference
3558        // count, which happens before this fence, which happens before the
3559        // deletion of the data.
3560        //
3561        // As explained in the [Boost documentation][1],
3562        //
3563        // > It is important to enforce any possible access to the object in one
3564        // > thread (through an existing reference) to *happen before* deleting
3565        // > the object in a different thread. This is achieved by a "release"
3566        // > operation after dropping a reference (any access to the object
3567        // > through this reference must obviously happened before), and an
3568        // > "acquire" operation before deleting the object.
3569        //
3570        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3571        atomic::fence(Acquire);
3572
3573        // Drop the data
3574        let arc = Box::from_raw(ptr);
3575        arc.pool.release(arc.vec.capacity());
3576    }
3577}
3578
3579fn release_shared_vec(ptr: *mut SharedVec) {
3580    // `Shared` storage... follow the drop steps from Arc.
3581    unsafe {
3582        if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
3583            return;
3584        }
3585
3586        // This fence is needed to prevent reordering of use of the data and
3587        // deletion of the data.  Because it is marked `Release`, the decreasing
3588        // of the reference count synchronizes with this `Acquire` fence. This
3589        // means that use of the data happens before decreasing the reference
3590        // count, which happens before this fence, which happens before the
3591        // deletion of the data.
3592        //
3593        // As explained in the [Boost documentation][1],
3594        //
3595        // > It is important to enforce any possible access to the object in one
3596        // > thread (through an existing reference) to *happen before* deleting
3597        // > the object in a different thread. This is achieved by a "release"
3598        // > operation after dropping a reference (any access to the object
3599        // > through this reference must obviously happened before), and an
3600        // > "acquire" operation before deleting the object.
3601        //
3602        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
3603        atomic::fence(Acquire);
3604
3605        // Drop the data
3606        let cap = (*ptr).cap;
3607        (*ptr).pool.release(cap);
3608        ptr::drop_in_place(ptr);
3609        Vec::<u8>::from_raw_parts(ptr as *mut u8, 0, cap);
3610    }
3611}
3612
3613impl Shared {
3614    fn is_unique(&self) -> bool {
3615        // The goal is to check if the current handle is the only handle
3616        // that currently has access to the buffer. This is done by
3617        // checking if the `ref_count` is currently 1.
3618        //
3619        // The `Acquire` ordering synchronizes with the `Release` as
3620        // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
3621        // operation guarantees that any mutations done in other threads
3622        // are ordered before the `ref_count` is decremented. As such,
3623        // this `Acquire` will guarantee that those mutations are
3624        // visible to the current thread.
3625        self.ref_count.load(Acquire) == 1
3626    }
3627}
3628
3629impl SharedVec {
3630    fn is_unique(&self) -> bool {
3631        // This is same as Shared::is_unique() but for KIND_VEC
3632        self.ref_count.load(Acquire) == 1
3633    }
3634}
3635
3636unsafe impl Send for Inner {}
3637unsafe impl Sync for Inner {}
3638
3639/*
3640 *
3641 * ===== PartialEq / PartialOrd =====
3642 *
3643 */
3644
3645impl PartialEq<[u8]> for BytesMut {
3646    fn eq(&self, other: &[u8]) -> bool {
3647        &**self == other
3648    }
3649}
3650
3651impl PartialEq<BytesMut> for [u8] {
3652    fn eq(&self, other: &BytesMut) -> bool {
3653        *other == *self
3654    }
3655}
3656
3657impl PartialEq<str> for BytesMut {
3658    fn eq(&self, other: &str) -> bool {
3659        &**self == other.as_bytes()
3660    }
3661}
3662
3663impl PartialEq<BytesMut> for str {
3664    fn eq(&self, other: &BytesMut) -> bool {
3665        *other == *self
3666    }
3667}
3668
3669impl PartialEq<Vec<u8>> for BytesMut {
3670    fn eq(&self, other: &Vec<u8>) -> bool {
3671        *self == other[..]
3672    }
3673}
3674
3675impl PartialEq<BytesMut> for Vec<u8> {
3676    fn eq(&self, other: &BytesMut) -> bool {
3677        *other == *self
3678    }
3679}
3680
3681impl PartialEq<String> for BytesMut {
3682    fn eq(&self, other: &String) -> bool {
3683        *self == other[..]
3684    }
3685}
3686
3687impl PartialEq<BytesMut> for String {
3688    fn eq(&self, other: &BytesMut) -> bool {
3689        *other == *self
3690    }
3691}
3692
3693impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
3694where
3695    BytesMut: PartialEq<T>,
3696{
3697    fn eq(&self, other: &&'a T) -> bool {
3698        *self == **other
3699    }
3700}
3701
3702impl PartialEq<BytesMut> for &[u8] {
3703    fn eq(&self, other: &BytesMut) -> bool {
3704        *other == *self
3705    }
3706}
3707
3708impl PartialEq<BytesMut> for &str {
3709    fn eq(&self, other: &BytesMut) -> bool {
3710        *other == *self
3711    }
3712}
3713
3714impl PartialEq<[u8]> for Bytes {
3715    fn eq(&self, other: &[u8]) -> bool {
3716        self.inner.as_ref() == other
3717    }
3718}
3719
3720impl PartialOrd<[u8]> for Bytes {
3721    fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
3722        self.inner.as_ref().partial_cmp(other)
3723    }
3724}
3725
3726impl PartialEq<Bytes> for [u8] {
3727    fn eq(&self, other: &Bytes) -> bool {
3728        *other == *self
3729    }
3730}
3731
3732impl PartialOrd<Bytes> for [u8] {
3733    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3734        other.partial_cmp(self)
3735    }
3736}
3737
3738impl PartialEq<str> for Bytes {
3739    fn eq(&self, other: &str) -> bool {
3740        self.inner.as_ref() == other.as_bytes()
3741    }
3742}
3743
3744impl PartialOrd<str> for Bytes {
3745    fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
3746        self.inner.as_ref().partial_cmp(other.as_bytes())
3747    }
3748}
3749
3750impl PartialEq<Bytes> for str {
3751    fn eq(&self, other: &Bytes) -> bool {
3752        *other == *self
3753    }
3754}
3755
3756impl PartialOrd<Bytes> for str {
3757    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3758        other.partial_cmp(self)
3759    }
3760}
3761
3762impl PartialEq<Vec<u8>> for Bytes {
3763    fn eq(&self, other: &Vec<u8>) -> bool {
3764        *self == other[..]
3765    }
3766}
3767
3768impl PartialOrd<Vec<u8>> for Bytes {
3769    fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
3770        self.inner.as_ref().partial_cmp(&other[..])
3771    }
3772}
3773
3774impl PartialEq<Bytes> for Vec<u8> {
3775    fn eq(&self, other: &Bytes) -> bool {
3776        *other == *self
3777    }
3778}
3779
3780impl PartialOrd<Bytes> for Vec<u8> {
3781    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3782        other.partial_cmp(self)
3783    }
3784}
3785
3786impl PartialEq<String> for Bytes {
3787    fn eq(&self, other: &String) -> bool {
3788        *self == other[..]
3789    }
3790}
3791
3792impl PartialOrd<String> for Bytes {
3793    fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
3794        self.inner.as_ref().partial_cmp(other.as_bytes())
3795    }
3796}
3797
3798impl PartialEq<Bytes> for String {
3799    fn eq(&self, other: &Bytes) -> bool {
3800        *other == *self
3801    }
3802}
3803
3804impl PartialOrd<Bytes> for String {
3805    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3806        other.partial_cmp(self)
3807    }
3808}
3809
3810impl PartialEq<Bytes> for &[u8] {
3811    fn eq(&self, other: &Bytes) -> bool {
3812        *other == *self
3813    }
3814}
3815
3816impl PartialOrd<Bytes> for &[u8] {
3817    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3818        other.partial_cmp(self)
3819    }
3820}
3821
3822impl PartialEq<Bytes> for &str {
3823    fn eq(&self, other: &Bytes) -> bool {
3824        *other == *self
3825    }
3826}
3827
3828impl PartialOrd<Bytes> for &str {
3829    fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
3830        other.partial_cmp(self)
3831    }
3832}
3833
3834impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
3835where
3836    Bytes: PartialEq<T>,
3837{
3838    fn eq(&self, other: &&'a T) -> bool {
3839        *self == **other
3840    }
3841}
3842
3843impl From<BytesVec> for Bytes {
3844    fn from(b: BytesVec) -> Self {
3845        b.freeze()
3846    }
3847}
3848
3849impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
3850where
3851    Bytes: PartialOrd<T>,
3852{
3853    fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
3854        self.partial_cmp(&**other)
3855    }
3856}
3857
3858impl PartialEq<BytesMut> for Bytes {
3859    fn eq(&self, other: &BytesMut) -> bool {
3860        other[..] == self[..]
3861    }
3862}
3863
3864impl PartialEq<BytesVec> for Bytes {
3865    fn eq(&self, other: &BytesVec) -> bool {
3866        other[..] == self[..]
3867    }
3868}
3869
3870impl PartialEq<Bytes> for BytesVec {
3871    fn eq(&self, other: &Bytes) -> bool {
3872        other[..] == self[..]
3873    }
3874}
3875
3876impl PartialEq<Bytes> for BytesMut {
3877    fn eq(&self, other: &Bytes) -> bool {
3878        other[..] == self[..]
3879    }
3880}
3881
3882impl PartialEq<BytesMut> for BytesVec {
3883    fn eq(&self, other: &BytesMut) -> bool {
3884        other[..] == self[..]
3885    }
3886}
3887
3888impl PartialEq<BytesVec> for BytesMut {
3889    fn eq(&self, other: &BytesVec) -> bool {
3890        other[..] == self[..]
3891    }
3892}
3893
3894impl PartialEq<[u8]> for BytesVec {
3895    fn eq(&self, other: &[u8]) -> bool {
3896        &**self == other
3897    }
3898}
3899
3900impl PartialEq<BytesVec> for [u8] {
3901    fn eq(&self, other: &BytesVec) -> bool {
3902        *other == *self
3903    }
3904}
3905
3906impl PartialEq<str> for BytesVec {
3907    fn eq(&self, other: &str) -> bool {
3908        &**self == other.as_bytes()
3909    }
3910}
3911
3912impl PartialEq<BytesVec> for str {
3913    fn eq(&self, other: &BytesVec) -> bool {
3914        *other == *self
3915    }
3916}
3917
3918impl PartialEq<Vec<u8>> for BytesVec {
3919    fn eq(&self, other: &Vec<u8>) -> bool {
3920        *self == other[..]
3921    }
3922}
3923
3924impl PartialEq<BytesVec> for Vec<u8> {
3925    fn eq(&self, other: &BytesVec) -> bool {
3926        *other == *self
3927    }
3928}
3929
3930impl PartialEq<String> for BytesVec {
3931    fn eq(&self, other: &String) -> bool {
3932        *self == other[..]
3933    }
3934}
3935
3936impl PartialEq<BytesVec> for String {
3937    fn eq(&self, other: &BytesVec) -> bool {
3938        *other == *self
3939    }
3940}
3941
3942impl<'a, T: ?Sized> PartialEq<&'a T> for BytesVec
3943where
3944    BytesVec: PartialEq<T>,
3945{
3946    fn eq(&self, other: &&'a T) -> bool {
3947        *self == **other
3948    }
3949}
3950
3951impl PartialEq<BytesVec> for &[u8] {
3952    fn eq(&self, other: &BytesVec) -> bool {
3953        *other == *self
3954    }
3955}
3956
3957impl PartialEq<BytesVec> for &str {
3958    fn eq(&self, other: &BytesVec) -> bool {
3959        *other == *self
3960    }
3961}
3962
3963// While there is `std::process:abort`, it's only available in Rust 1.17, and
3964// our minimum supported version is currently 1.15. So, this acts as an abort
3965// by triggering a double panic, which always aborts in Rust.
3966struct Abort;
3967
3968impl Drop for Abort {
3969    fn drop(&mut self) {
3970        panic!();
3971    }
3972}
3973
3974#[inline(never)]
3975#[cold]
3976fn abort() {
3977    let _a = Abort;
3978    panic!();
3979}
3980
3981#[cfg(test)]
3982mod tests {
3983    use std::convert::TryFrom;
3984
3985    use super::*;
3986
3987    const LONG: &[u8] =
3988        b"mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
3989        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
3990        mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb";
3991
3992    #[test]
3993    fn trimdown() {
3994        let mut b = Bytes::from(LONG.to_vec());
3995        assert_eq!(b.inner.capacity(), 263);
3996        unsafe { b.inner.set_len(68) };
3997        assert_eq!(b.len(), 68);
3998        assert_eq!(b.inner.capacity(), 263);
3999        b.trimdown();
4000        assert_eq!(b.inner.capacity(), 96);
4001
4002        unsafe { b.inner.set_len(16) };
4003        b.trimdown();
4004        assert!(b.is_inline());
4005    }
4006
4007    #[test]
4008    fn bytes() {
4009        let mut b = Bytes::from(LONG.to_vec());
4010        b.clear();
4011        assert!(b.is_inline());
4012        assert!(b.is_empty());
4013        assert!(b.len() == 0);
4014
4015        let b = Bytes::from(BytesMut::from(LONG));
4016        assert_eq!(b, LONG);
4017
4018        let b = BytesMut::try_from(b).unwrap();
4019        assert_eq!(b, LONG);
4020    }
4021    #[test]
4022    fn bytes_vec() {
4023        let bv = BytesVec::copy_from_slice(&LONG[..]);
4024        // SharedVec size is 32
4025        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>() * 9);
4026        assert_eq!(bv.len(), 263);
4027        assert_eq!(bv.as_ref().len(), 263);
4028        assert_eq!(bv.as_ref(), &LONG[..]);
4029
4030        let mut bv = BytesVec::copy_from_slice(&b"hello"[..]);
4031        assert_eq!(bv.capacity(), mem::size_of::<SharedVec>());
4032        assert_eq!(bv.len(), 5);
4033        assert_eq!(bv.as_ref().len(), 5);
4034        assert_eq!(bv.as_ref()[0], b"h"[0]);
4035        bv.put_u8(b" "[0]);
4036        assert_eq!(bv.as_ref(), &b"hello "[..]);
4037        bv.put("world");
4038        assert_eq!(bv, "hello world");
4039
4040        let b = Bytes::from(bv);
4041        assert_eq!(b, "hello world");
4042
4043        let mut b = BytesMut::try_from(b).unwrap();
4044        b.put(".");
4045        assert_eq!(b, "hello world.");
4046    }
4047}