bytes_expand/bytes.rs
1use {IntoBuf, Buf, BufMut};
2use buf::Iter;
3use debug;
4
5use std::{cmp, fmt, mem, hash, ops, slice, ptr, usize};
6use std::borrow::{Borrow, BorrowMut};
7use std::io::Cursor;
8use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
9use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel};
10use std::iter::{FromIterator, Iterator};
11
12/// A reference counted contiguous slice of memory.
13///
14/// `Bytes` is an efficient container for storing and operating on contiguous
15/// slices of memory. It is intended for use primarily in networking code, but
16/// could have applications elsewhere as well.
17///
18/// `Bytes` values facilitate zero-copy network programming by allowing multiple
19/// `Bytes` objects to point to the same underlying memory. This is managed by
20/// using a reference count to track when the memory is no longer needed and can
21/// be freed.
22///
23/// ```
24/// use bytes::Bytes;
25///
26/// let mut mem = Bytes::from(&b"Hello world"[..]);
27/// let a = mem.slice(0, 5);
28///
29/// assert_eq!(&a[..], b"Hello");
30///
31/// let b = mem.split_to(6);
32///
33/// assert_eq!(&mem[..], b"world");
34/// assert_eq!(&b[..], b"Hello ");
35/// ```
36///
37/// # Memory layout
38///
39/// The `Bytes` struct itself is fairly small, limited to a pointer to the
40/// memory and 4 `usize` fields used to track information about which segment of
41/// the underlying memory the `Bytes` handle has access to.
42///
43/// The memory layout looks like this:
44///
45/// ```text
46/// +-------+
47/// | Bytes |
48/// +-------+
49/// / \_____
50/// | \
51/// v v
52/// +-----+------------------------------------+
53/// | Arc | | Data | |
54/// +-----+------------------------------------+
55/// ```
56///
57/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory
58/// slice and a pointer to the start of the region visible by the handle.
59/// `Bytes` also tracks the length of its view into the memory.
60///
61/// # Sharing
62///
63/// The memory itself is reference counted, and multiple `Bytes` objects may
64/// point to the same region. Each `Bytes` handle point to different sections within
65/// the memory region, and `Bytes` handle may or may not have overlapping views
66/// into the memory.
67///
68///
69/// ```text
70///
71/// Arc ptrs +---------+
72/// ________________________ / | Bytes 2 |
73/// / +---------+
74/// / +-----------+ | |
75/// |_________/ | Bytes 1 | | |
76/// | +-----------+ | |
77/// | | | ___/ data | tail
78/// | data | tail |/ |
79/// v v v v
80/// +-----+---------------------------------+-----+
81/// | Arc | | | | |
82/// +-----+---------------------------------+-----+
83/// ```
84///
85/// # Mutating
86///
87/// While `Bytes` handles may potentially represent overlapping views of the
88/// underlying memory slice and may not be mutated, `BytesMut` handles are
89/// guaranteed to be the only handle able to view that slice of memory. As such,
90/// `BytesMut` handles are able to mutate the underlying memory. Note that
91/// holding a unique view to a region of memory does not mean that there are no
92/// other `Bytes` and `BytesMut` handles with disjoint views of the underlying
93/// memory.
94///
95/// # Inline bytes
96///
97/// As an optimization, when the slice referenced by a `Bytes` or `BytesMut`
98/// handle is small enough [^1], `with_capacity` will avoid the allocation by
99/// inlining the slice directly in the handle. In this case, a clone is no
100/// longer "shallow" and the data will be copied. Converting from a `Vec` will
101/// never use inlining.
102///
103/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems.
104///
105pub struct Bytes {
106 inner: Inner,
107}
108
109/// A unique reference to a contiguous slice of memory.
110///
111/// `BytesMut` represents a unique view into a potentially shared memory region.
112/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
113/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and
114/// allocations.
115///
116/// For more detail, see [Bytes](struct.Bytes.html).
117///
118/// # Growth
119///
120/// One key difference from `Vec<u8>` is that most operations **do not
121/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello
122/// world");` could panic if `my_bytes` does not have enough capacity. Before
123/// writing to the buffer, ensure that there is enough remaining capacity by
124/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve`
125/// is preferable.
126///
127/// The only exception is `extend` which implicitly reserves required capacity.
128///
129/// # Examples
130///
131/// ```
132/// use bytes::{BytesMut, BufMut};
133///
134/// let mut buf = BytesMut::with_capacity(64);
135///
136/// buf.put(b'h');
137/// buf.put(b'e');
138/// buf.put("llo");
139///
140/// assert_eq!(&buf[..], b"hello");
141///
142/// // Freeze the buffer so that it can be shared
143/// let a = buf.freeze();
144///
145/// // This does not allocate, instead `b` points to the same memory.
146/// let b = a.clone();
147///
148/// assert_eq!(&a[..], b"hello");
149/// assert_eq!(&b[..], b"hello");
150/// ```
151pub struct BytesMut {
152 inner: Inner,
153 auto_expand_size: usize,
154}
155
156// Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated
157// to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions
158// that mutate the underlying buffer are only performed when the data range
159// being mutated is only available via a single `BytesMut` handle.
160//
161// # Data storage modes
162//
163// The goal of `bytes` is to be as efficient as possible across a wide range of
164// potential usage patterns. As such, `bytes` needs to be able to handle buffers
165// that are never shared, shared on a single thread, and shared across many
166// threads. `bytes` also needs to handle both tiny buffers as well as very large
167// buffers. For example, [Cassandra](http://cassandra.apache.org) values have
168// been known to be in the hundreds of megabyte, and HTTP header values can be a
169// few characters in size.
170//
171// To achieve high performance in these various situations, `Bytes` and
172// `BytesMut` use different strategies for storing the buffer depending on the
173// usage pattern.
174//
175// ## Delayed `Arc` allocation
176//
177// When a `Bytes` or `BytesMut` is first created, there is only one outstanding
178// handle referencing the buffer. Since sharing is not yet required, an `Arc`* is
179// not used and the buffer is backed by a `Vec<u8>` directly. Using an
180// `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being
181// shared, that allocation is avoided.
182//
183// When sharing does become necessary (`clone`, `split_to`, `split_off`), that
184// is when the buffer is promoted to being shareable. The `Vec<u8>` is moved
185// into an `Arc` and both the original handle and the new handle use the same
186// buffer via the `Arc`.
187//
188// * `Arc` is being used to signify an atomically reference counted cell. We
189// don't use the `Arc` implementation provided by `std` and instead use our own.
190// This ends up simplifying a number of the `unsafe` code snippets.
191//
192// ## Inlining small buffers
193//
194// The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit
195// systems, this ends up being 32 bytes, which is actually a lot of storage for
196// cases where `Bytes` is being used to represent small byte strings, such as
197// HTTP header names and values.
198//
199// To avoid any allocation at all in these cases, `Bytes` will use the struct
200// itself for storing the buffer, reserving 1 byte for meta data. This means
201// that, on 64 bit systems, 31 byte buffers require no allocation at all.
202//
203// The byte used for metadata stores a 2 bits flag used to indicate that the
204// buffer is stored inline as well as 6 bits for tracking the buffer length (the
205// return value of `Bytes::len`).
206//
207// ## Static buffers
208//
209// `Bytes` can also represent a static buffer, which is created with
210// `Bytes::from_static`. No copying or allocations are required for tracking
211// static buffers. The pointer to the `&'static [u8]`, the length, and a flag
212// tracking that the `Bytes` instance represents a static buffer is stored in
213// the `Bytes` struct.
214//
215// # Struct layout
216//
217// Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the
218// data fields as well as all of the function implementations.
219//
220// The `Inner` struct is carefully laid out in order to support the
221// functionality described above as well as being as small as possible. Size is
222// important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes
223// added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header
224// map structure.
225//
226// The `Inner` struct contains the following fields:
227//
228// * `ptr: *mut u8`
229// * `len: usize`
230// * `cap: usize`
231// * `arc: AtomicPtr<Shared>`
232//
233// ## `ptr: *mut u8`
234//
235// A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`,
236// this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr`
237// may have been shifted to point somewhere inside the buffer.
238//
239// When in "inlined" mode, `ptr` is used as part of the inlined buffer.
240//
241// ## `len: usize`
242//
243// The length of the handle's buffer view. When backed by a `Vec<u8>`, this is
244// always the `Vec`'s length. The slice represented by `ptr` and `len` should
245// (ideally) always be initialized memory.
246//
247// When in "inlined" mode, `len` is used as part of the inlined buffer.
248//
249// ## `cap: usize`
250//
251// The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is
252// always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len`
253// may or may not be initialized memory.
254//
255// When in "inlined" mode, `cap` is used as part of the inlined buffer.
256//
257// ## `arc: AtomicPtr<Shared>`
258//
259// When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this
260// will be the pointer to the `Arc` structure tracking the ref count for the
261// underlying buffer. When the pointer is null, then the `Arc` has not been
262// allocated yet and `self` is the only outstanding handle for the underlying
263// buffer.
264//
265// The lower two bits of `arc` are used to track the storage mode of `Inner`.
266// `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11`
267// indicates vector storage, not yet promoted to Arc. Since pointers to
268// allocated structures are aligned, the lower two bits of a pointer will always
269// be 0. This allows disambiguating between a pointer and the two flags.
270//
271// When in "inlined" mode, the least significant byte of `arc` is also used to
272// store the length of the buffer view (vs. the capacity, which is a constant).
273//
274// The rest of `arc`'s bytes are used as part of the inline buffer, which means
275// that those bytes need to be located next to the `ptr`, `len`, and `cap`
276// fields, which make up the rest of the inline buffer. This requires special
277// casing the layout of `Inner` depending on if the target platform is big or
278// little endian.
279//
280// On little endian platforms, the `arc` field must be the first field in the
281// struct. On big endian platforms, the `arc` field must be the last field in
282// the struct. Since a deterministic struct layout is required, `Inner` is
283// annotated with `#[repr(C)]`.
284//
285// # Thread safety
286//
287// `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done
288// by bumping the buffer ref count and returning a new struct pointing to the
289// same buffer. However, the `Arc` structure is lazily allocated. This means
290// that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone`
291// function can be called concurrently from multiple threads. This is why an
292// `AtomicPtr` is used for the `arc` field vs. a `*const`.
293//
294// Care is taken to ensure that the need for synchronization is minimized. Most
295// operations do not require any synchronization.
296//
297#[cfg(target_endian = "little")]
298#[repr(C)]
299struct Inner {
300 // WARNING: Do not access the fields directly unless you know what you are
301 // doing. Instead, use the fns. See implementation comment above.
302 arc: AtomicPtr<Shared>,
303 ptr: *mut u8,
304 len: usize,
305 cap: usize,
306}
307
308#[cfg(target_endian = "big")]
309#[repr(C)]
310struct Inner {
311 // WARNING: Do not access the fields directly unless you know what you are
312 // doing. Instead, use the fns. See implementation comment above.
313 ptr: *mut u8,
314 len: usize,
315 cap: usize,
316 arc: AtomicPtr<Shared>,
317}
318
319// Thread-safe reference-counted container for the shared storage. This mostly
320// the same as `std::sync::Arc` but without the weak counter. The ref counting
321// fns are based on the ones found in `std`.
322//
323// The main reason to use `Shared` instead of `std::sync::Arc` is that it ends
324// up making the overall code simpler and easier to reason about. This is due to
325// some of the logic around setting `Inner::arc` and other ways the `arc` field
326// is used. Using `Arc` ended up requiring a number of funky transmutes and
327// other shenanigans to make it work.
328struct Shared {
329 vec: Vec<u8>,
330 original_capacity_repr: usize,
331 ref_count: AtomicUsize,
332}
333
334// Buffer storage strategy flags.
335const KIND_ARC: usize = 0b00;
336const KIND_INLINE: usize = 0b01;
337const KIND_STATIC: usize = 0b10;
338const KIND_VEC: usize = 0b11;
339const KIND_MASK: usize = 0b11;
340
341// The max original capacity value. Any `Bytes` allocated with a greater initial
342// capacity will default to this.
343const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
344// The original capacity algorithm will not take effect unless the originally
345// allocated capacity was at least 1kb in size.
346const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
347// The original capacity is stored in powers of 2 starting at 1kb to a max of
348// 64kb. Representing it as such requires only 3 bits of storage.
349const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
350const ORIGINAL_CAPACITY_OFFSET: usize = 2;
351
352// When the storage is in the `Vec` representation, the pointer can be advanced
353// at most this value. This is due to the amount of storage available to track
354// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
355// bits.
356const VEC_POS_OFFSET: usize = 5;
357const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
358const NOT_VEC_POS_MASK: usize = 0b11111;
359
360// Bit op constants for extracting the inline length value from the `arc` field.
361const INLINE_LEN_MASK: usize = 0b11111100;
362const INLINE_LEN_OFFSET: usize = 2;
363
364// Byte offset from the start of `Inner` to where the inline buffer data
365// starts. On little endian platforms, the first byte of the struct is the
366// storage flag, so the data is shifted by a byte. On big endian systems, the
367// data starts at the beginning of the struct.
368#[cfg(target_endian = "little")]
369const INLINE_DATA_OFFSET: isize = 1;
370#[cfg(target_endian = "big")]
371const INLINE_DATA_OFFSET: isize = 0;
372
373#[cfg(target_pointer_width = "64")]
374const PTR_WIDTH: usize = 64;
375#[cfg(target_pointer_width = "32")]
376const PTR_WIDTH: usize = 32;
377
378// Inline buffer capacity. This is the size of `Inner` minus 1 byte for the
379// metadata.
380#[cfg(target_pointer_width = "64")]
381const INLINE_CAP: usize = 4 * 8 - 1;
382#[cfg(target_pointer_width = "32")]
383const INLINE_CAP: usize = 4 * 4 - 1;
384
385/*
386 *
387 * ===== Bytes =====
388 *
389 */
390
391impl Bytes {
392 /// Creates a new `Bytes` with the specified capacity.
393 ///
394 /// The returned `Bytes` will be able to hold at least `capacity` bytes
395 /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
396 /// then `BytesMut` will not allocate.
397 ///
398 /// It is important to note that this function does not specify the length
399 /// of the returned `Bytes`, but only the capacity.
400 ///
401 /// # Examples
402 ///
403 /// ```
404 /// use bytes::Bytes;
405 ///
406 /// let mut bytes = Bytes::with_capacity(64);
407 ///
408 /// // `bytes` contains no data, even though there is capacity
409 /// assert_eq!(bytes.len(), 0);
410 ///
411 /// bytes.extend_from_slice(&b"hello world"[..]);
412 ///
413 /// assert_eq!(&bytes[..], b"hello world");
414 /// ```
415 #[inline]
416 pub fn with_capacity(capacity: usize) -> Bytes {
417 Bytes {
418 inner: Inner::with_capacity(capacity),
419 }
420 }
421
422 /// Creates a new empty `Bytes`.
423 ///
424 /// This will not allocate and the returned `Bytes` handle will be empty.
425 ///
426 /// # Examples
427 ///
428 /// ```
429 /// use bytes::Bytes;
430 ///
431 /// let b = Bytes::new();
432 /// assert_eq!(&b[..], b"");
433 /// ```
434 #[inline]
435 pub fn new() -> Bytes {
436 Bytes::with_capacity(0)
437 }
438
439 /// Creates a new `Bytes` from a static slice.
440 ///
441 /// The returned `Bytes` will point directly to the static slice. There is
442 /// no allocating or copying.
443 ///
444 /// # Examples
445 ///
446 /// ```
447 /// use bytes::Bytes;
448 ///
449 /// let b = Bytes::from_static(b"hello");
450 /// assert_eq!(&b[..], b"hello");
451 /// ```
452 #[inline]
453 pub fn from_static(bytes: &'static [u8]) -> Bytes {
454 Bytes {
455 inner: Inner::from_static(bytes),
456 }
457 }
458
459 /// Returns the number of bytes contained in this `Bytes`.
460 ///
461 /// # Examples
462 ///
463 /// ```
464 /// use bytes::Bytes;
465 ///
466 /// let b = Bytes::from(&b"hello"[..]);
467 /// assert_eq!(b.len(), 5);
468 /// ```
469 #[inline]
470 pub fn len(&self) -> usize {
471 self.inner.len()
472 }
473
474 /// Returns true if the `Bytes` has a length of 0.
475 ///
476 /// # Examples
477 ///
478 /// ```
479 /// use bytes::Bytes;
480 ///
481 /// let b = Bytes::new();
482 /// assert!(b.is_empty());
483 /// ```
484 #[inline]
485 pub fn is_empty(&self) -> bool {
486 self.inner.is_empty()
487 }
488
489 /// Returns a slice of self for the index range `[begin..end)`.
490 ///
491 /// This will increment the reference count for the underlying memory and
492 /// return a new `Bytes` handle set to the slice.
493 ///
494 /// This operation is `O(1)`.
495 ///
496 /// # Examples
497 ///
498 /// ```
499 /// use bytes::Bytes;
500 ///
501 /// let a = Bytes::from(&b"hello world"[..]);
502 /// let b = a.slice(2, 5);
503 ///
504 /// assert_eq!(&b[..], b"llo");
505 /// ```
506 ///
507 /// # Panics
508 ///
509 /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
510 /// will panic.
511 pub fn slice(&self, begin: usize, end: usize) -> Bytes {
512 assert!(begin <= end);
513 assert!(end <= self.len());
514
515 if end - begin <= INLINE_CAP {
516 return Bytes::from(&self[begin..end]);
517 }
518
519 let mut ret = self.clone();
520
521 unsafe {
522 ret.inner.set_end(end);
523 ret.inner.set_start(begin);
524 }
525
526 ret
527 }
528
529 /// Returns a slice of self for the index range `[begin..self.len())`.
530 ///
531 /// This will increment the reference count for the underlying memory and
532 /// return a new `Bytes` handle set to the slice.
533 ///
534 /// This operation is `O(1)` and is equivalent to `self.slice(begin,
535 /// self.len())`.
536 ///
537 /// # Examples
538 ///
539 /// ```
540 /// use bytes::Bytes;
541 ///
542 /// let a = Bytes::from(&b"hello world"[..]);
543 /// let b = a.slice_from(6);
544 ///
545 /// assert_eq!(&b[..], b"world");
546 /// ```
547 ///
548 /// # Panics
549 ///
550 /// Requires that `begin <= self.len()`, otherwise slicing will panic.
551 pub fn slice_from(&self, begin: usize) -> Bytes {
552 self.slice(begin, self.len())
553 }
554
555 /// Returns a slice of self for the index range `[0..end)`.
556 ///
557 /// This will increment the reference count for the underlying memory and
558 /// return a new `Bytes` handle set to the slice.
559 ///
560 /// This operation is `O(1)` and is equivalent to `self.slice(0, end)`.
561 ///
562 /// # Examples
563 ///
564 /// ```
565 /// use bytes::Bytes;
566 ///
567 /// let a = Bytes::from(&b"hello world"[..]);
568 /// let b = a.slice_to(5);
569 ///
570 /// assert_eq!(&b[..], b"hello");
571 /// ```
572 ///
573 /// # Panics
574 ///
575 /// Requires that `end <= self.len()`, otherwise slicing will panic.
576 pub fn slice_to(&self, end: usize) -> Bytes {
577 self.slice(0, end)
578 }
579
580 /// Returns a slice of self that is equivalent to the given `subset`.
581 ///
582 /// When processing a `Bytes` buffer with other tools, one often gets a
583 /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it.
584 /// This function turns that `&[u8]` into another `Bytes`, as if one had
585 /// called `self.slice()` with the offsets that correspond to `subset`.
586 ///
587 /// This operation is `O(1)`.
588 ///
589 /// # Examples
590 ///
591 /// ```
592 /// use bytes::Bytes;
593 ///
594 /// let bytes = Bytes::from(&b"012345678"[..]);
595 /// let as_slice = bytes.as_ref();
596 /// let subset = &as_slice[2..6];
597 /// let subslice = bytes.slice_ref(&subset);
598 /// assert_eq!(&subslice[..], b"2345");
599 /// ```
600 ///
601 /// # Panics
602 ///
603 /// Requires that the given `sub` slice is in fact contained within the
604 /// `Bytes` buffer; otherwise this function will panic.
605 pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
606 let bytes_p = self.as_ptr() as usize;
607 let bytes_len = self.len();
608
609 let sub_p = subset.as_ptr() as usize;
610 let sub_len = subset.len();
611
612 assert!(sub_p >= bytes_p);
613 assert!(sub_p + sub_len <= bytes_p + bytes_len);
614
615 let sub_offset = sub_p - bytes_p;
616
617 self.slice(sub_offset, sub_offset + sub_len)
618 }
619
620 /// Splits the bytes into two at the given index.
621 ///
622 /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes`
623 /// contains elements `[at, len)`.
624 ///
625 /// This is an `O(1)` operation that just increases the reference count and
626 /// sets a few indices.
627 ///
628 /// # Examples
629 ///
630 /// ```
631 /// use bytes::Bytes;
632 ///
633 /// let mut a = Bytes::from(&b"hello world"[..]);
634 /// let b = a.split_off(5);
635 ///
636 /// assert_eq!(&a[..], b"hello");
637 /// assert_eq!(&b[..], b" world");
638 /// ```
639 ///
640 /// # Panics
641 ///
642 /// Panics if `at > len`.
643 pub fn split_off(&mut self, at: usize) -> Bytes {
644 assert!(at <= self.len());
645
646 if at == self.len() {
647 return Bytes::new();
648 }
649
650 if at == 0 {
651 return mem::replace(self, Bytes::new());
652 }
653
654 Bytes {
655 inner: self.inner.split_off(at),
656 }
657 }
658
659 /// Splits the bytes into two at the given index.
660 ///
661 /// Afterwards `self` contains elements `[at, len)`, and the returned
662 /// `Bytes` contains elements `[0, at)`.
663 ///
664 /// This is an `O(1)` operation that just increases the reference count and
665 /// sets a few indices.
666 ///
667 /// # Examples
668 ///
669 /// ```
670 /// use bytes::Bytes;
671 ///
672 /// let mut a = Bytes::from(&b"hello world"[..]);
673 /// let b = a.split_to(5);
674 ///
675 /// assert_eq!(&a[..], b" world");
676 /// assert_eq!(&b[..], b"hello");
677 /// ```
678 ///
679 /// # Panics
680 ///
681 /// Panics if `at > len`.
682 pub fn split_to(&mut self, at: usize) -> Bytes {
683 assert!(at <= self.len());
684
685 if at == self.len() {
686 return mem::replace(self, Bytes::new());
687 }
688
689 if at == 0 {
690 return Bytes::new();
691 }
692
693 Bytes {
694 inner: self.inner.split_to(at),
695 }
696 }
697
698 #[deprecated(since = "0.4.1", note = "use split_to instead")]
699 #[doc(hidden)]
700 pub fn drain_to(&mut self, at: usize) -> Bytes {
701 self.split_to(at)
702 }
703
704 /// Shortens the buffer, keeping the first `len` bytes and dropping the
705 /// rest.
706 ///
707 /// If `len` is greater than the buffer's current length, this has no
708 /// effect.
709 ///
710 /// The [`split_off`] method can emulate `truncate`, but this causes the
711 /// excess bytes to be returned instead of dropped.
712 ///
713 /// # Examples
714 ///
715 /// ```
716 /// use bytes::Bytes;
717 ///
718 /// let mut buf = Bytes::from(&b"hello world"[..]);
719 /// buf.truncate(5);
720 /// assert_eq!(buf, b"hello"[..]);
721 /// ```
722 ///
723 /// [`split_off`]: #method.split_off
724 pub fn truncate(&mut self, len: usize) {
725 self.inner.truncate(len);
726 }
727
728 /// Shortens the buffer, dropping the first `cnt` bytes and keeping the
729 /// rest.
730 ///
731 /// This is the same function as `Buf::advance`, and in the next breaking
732 /// release of `bytes`, this implementation will be removed in favor of
733 /// having `Bytes` implement `Buf`.
734 ///
735 /// # Panics
736 ///
737 /// This function panics if `cnt` is greater than `self.len()`
738 #[inline]
739 pub fn advance(&mut self, cnt: usize) {
740 assert!(cnt <= self.len(), "cannot advance past `remaining`");
741 unsafe { self.inner.set_start(cnt); }
742 }
743
744 /// Clears the buffer, removing all data.
745 ///
746 /// # Examples
747 ///
748 /// ```
749 /// use bytes::Bytes;
750 ///
751 /// let mut buf = Bytes::from(&b"hello world"[..]);
752 /// buf.clear();
753 /// assert!(buf.is_empty());
754 /// ```
755 pub fn clear(&mut self) {
756 self.truncate(0);
757 }
758
759 /// Attempts to convert into a `BytesMut` handle.
760 ///
761 /// This will only succeed if there are no other outstanding references to
762 /// the underlying chunk of memory. `Bytes` handles that contain inlined
763 /// bytes will always be convertable to `BytesMut`.
764 ///
765 /// # Examples
766 ///
767 /// ```
768 /// use bytes::Bytes;
769 ///
770 /// let a = Bytes::from(&b"Mary had a little lamb, little lamb, little lamb..."[..]);
771 ///
772 /// // Create a shallow clone
773 /// let b = a.clone();
774 ///
775 /// // This will fail because `b` shares a reference with `a`
776 /// let a = a.try_mut().unwrap_err();
777 ///
778 /// drop(b);
779 ///
780 /// // This will succeed
781 /// let mut a = a.try_mut().unwrap();
782 ///
783 /// a[0] = b'b';
784 ///
785 /// assert_eq!(&a[..4], b"bary");
786 /// ```
787 pub fn try_mut(mut self) -> Result<BytesMut, Bytes> {
788 if self.inner.is_mut_safe() {
789 Ok(BytesMut { inner: self.inner, auto_expand_size: crate::DEFAULT_AUTO_EXPAND_SIZE })
790 } else {
791 Err(self)
792 }
793 }
794
795 /// Appends given bytes to this object.
796 ///
797 /// If this `Bytes` object has not enough capacity, it is resized first.
798 /// If it is shared (`refcount > 1`), it is copied first.
799 ///
800 /// This operation can be less effective than the similar operation on
801 /// `BytesMut`, especially on small additions.
802 ///
803 /// # Examples
804 ///
805 /// ```
806 /// use bytes::Bytes;
807 ///
808 /// let mut buf = Bytes::from("aabb");
809 /// buf.extend_from_slice(b"ccdd");
810 /// buf.extend_from_slice(b"eeff");
811 ///
812 /// assert_eq!(b"aabbccddeeff", &buf[..]);
813 /// ```
814 pub fn extend_from_slice(&mut self, extend: &[u8]) {
815 if extend.is_empty() {
816 return;
817 }
818
819 let new_cap = self.len().checked_add(extend.len()).expect("capacity overflow");
820
821 let result = match mem::replace(self, Bytes::new()).try_mut() {
822 Ok(mut bytes_mut) => {
823 bytes_mut.extend_from_slice(extend);
824 bytes_mut
825 },
826 Err(bytes) => {
827 let mut bytes_mut = BytesMut::with_capacity(new_cap, crate::DEFAULT_AUTO_EXPAND_SIZE);
828 bytes_mut.put_slice(&bytes);
829 bytes_mut.put_slice(extend);
830 bytes_mut
831 }
832 };
833
834 mem::replace(self, result.freeze());
835 }
836}
837
838impl IntoBuf for Bytes {
839 type Buf = Cursor<Self>;
840
841 fn into_buf(self) -> Self::Buf {
842 Cursor::new(self)
843 }
844}
845
846impl<'a> IntoBuf for &'a Bytes {
847 type Buf = Cursor<Self>;
848
849 fn into_buf(self) -> Self::Buf {
850 Cursor::new(self)
851 }
852}
853
854impl Clone for Bytes {
855 fn clone(&self) -> Bytes {
856 Bytes {
857 inner: unsafe { self.inner.shallow_clone(false) },
858 }
859 }
860}
861
862impl AsRef<[u8]> for Bytes {
863 #[inline]
864 fn as_ref(&self) -> &[u8] {
865 self.inner.as_ref()
866 }
867}
868
869impl ops::Deref for Bytes {
870 type Target = [u8];
871
872 #[inline]
873 fn deref(&self) -> &[u8] {
874 self.inner.as_ref()
875 }
876}
877
878impl From<BytesMut> for Bytes {
879 fn from(src: BytesMut) -> Bytes {
880 src.freeze()
881 }
882}
883
884impl From<Vec<u8>> for Bytes {
885 fn from(src: Vec<u8>) -> Bytes {
886 BytesMut::from(src).freeze()
887 }
888}
889
890impl From<String> for Bytes {
891 fn from(src: String) -> Bytes {
892 BytesMut::from(src).freeze()
893 }
894}
895
896impl<'a> From<&'a [u8]> for Bytes {
897 fn from(src: &'a [u8]) -> Bytes {
898 BytesMut::from(src).freeze()
899 }
900}
901
902impl<'a> From<&'a str> for Bytes {
903 fn from(src: &'a str) -> Bytes {
904 BytesMut::from(src).freeze()
905 }
906}
907
908impl FromIterator<u8> for BytesMut {
909 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
910 let iter = into_iter.into_iter();
911 let (min, maybe_max) = iter.size_hint();
912
913 let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min), crate::DEFAULT_AUTO_EXPAND_SIZE);
914
915 for i in iter {
916 out.reserve(1);
917 out.put(i);
918 }
919
920 out
921 }
922}
923
924impl FromIterator<u8> for Bytes {
925 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
926 BytesMut::from_iter(into_iter).freeze()
927 }
928}
929
930impl<'a> FromIterator<&'a u8> for BytesMut {
931 fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
932 BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
933 }
934}
935
936impl<'a> FromIterator<&'a u8> for Bytes {
937 fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
938 BytesMut::from_iter(into_iter).freeze()
939 }
940}
941
942impl PartialEq for Bytes {
943 fn eq(&self, other: &Bytes) -> bool {
944 self.inner.as_ref() == other.inner.as_ref()
945 }
946}
947
948impl PartialOrd for Bytes {
949 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
950 self.inner.as_ref().partial_cmp(other.inner.as_ref())
951 }
952}
953
954impl Ord for Bytes {
955 fn cmp(&self, other: &Bytes) -> cmp::Ordering {
956 self.inner.as_ref().cmp(other.inner.as_ref())
957 }
958}
959
960impl Eq for Bytes {
961}
962
963impl Default for Bytes {
964 #[inline]
965 fn default() -> Bytes {
966 Bytes::new()
967 }
968}
969
970impl fmt::Debug for Bytes {
971 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
972 fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
973 }
974}
975
976impl hash::Hash for Bytes {
977 fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
978 let s: &[u8] = self.as_ref();
979 s.hash(state);
980 }
981}
982
983impl Borrow<[u8]> for Bytes {
984 fn borrow(&self) -> &[u8] {
985 self.as_ref()
986 }
987}
988
989impl IntoIterator for Bytes {
990 type Item = u8;
991 type IntoIter = Iter<Cursor<Bytes>>;
992
993 fn into_iter(self) -> Self::IntoIter {
994 self.into_buf().iter()
995 }
996}
997
998impl<'a> IntoIterator for &'a Bytes {
999 type Item = u8;
1000 type IntoIter = Iter<Cursor<&'a Bytes>>;
1001
1002 fn into_iter(self) -> Self::IntoIter {
1003 self.into_buf().iter()
1004 }
1005}
1006
1007impl Extend<u8> for Bytes {
1008 fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
1009 let iter = iter.into_iter();
1010
1011 let (lower, upper) = iter.size_hint();
1012
1013 // Avoid possible conversion into mut if there's nothing to add
1014 if let Some(0) = upper {
1015 return;
1016 }
1017
1018 let mut bytes_mut = match mem::replace(self, Bytes::new()).try_mut() {
1019 Ok(bytes_mut) => bytes_mut,
1020 Err(bytes) => {
1021 let mut bytes_mut = BytesMut::with_capacity(bytes.len() + lower, crate::DEFAULT_AUTO_EXPAND_SIZE);
1022 bytes_mut.put_slice(&bytes);
1023 bytes_mut
1024 }
1025 };
1026
1027 bytes_mut.extend(iter);
1028
1029 mem::replace(self, bytes_mut.freeze());
1030 }
1031}
1032
1033impl<'a> Extend<&'a u8> for Bytes {
1034 fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
1035 self.extend(iter.into_iter().map(|b| *b))
1036 }
1037}
1038
1039/*
1040 *
1041 * ===== BytesMut =====
1042 *
1043 */
1044
1045impl BytesMut {
1046 /// Creates a new `BytesMut` with the specified capacity.
1047 ///
1048 /// The returned `BytesMut` will be able to hold at least `capacity` bytes
1049 /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
1050 /// then `BytesMut` will not allocate.
1051 ///
1052 /// It is important to note that this function does not specify the length
1053 /// of the returned `BytesMut`, but only the capacity.
1054 ///
1055 /// # Examples
1056 ///
1057 /// ```
1058 /// use bytes::{BytesMut, BufMut};
1059 ///
1060 /// let mut bytes = BytesMut::with_capacity(64);
1061 ///
1062 /// // `bytes` contains no data, even though there is capacity
1063 /// assert_eq!(bytes.len(), 0);
1064 ///
1065 /// bytes.put(&b"hello world"[..]);
1066 ///
1067 /// assert_eq!(&bytes[..], b"hello world");
1068 /// ```
1069 #[inline]
1070 pub fn with_capacity(capacity: usize, auto_expand_size: usize) -> BytesMut {
1071 BytesMut {
1072 inner: Inner::with_capacity(capacity),
1073 auto_expand_size
1074 }
1075 }
1076
1077 /// Creates a new `BytesMut` with default capacity.
1078 ///
1079 /// Resulting object has length 0 and unspecified capacity.
1080 /// This function does not allocate.
1081 ///
1082 /// # Examples
1083 ///
1084 /// ```
1085 /// use bytes::{BytesMut, BufMut};
1086 ///
1087 /// let mut bytes = BytesMut::new();
1088 ///
1089 /// assert_eq!(0, bytes.len());
1090 ///
1091 /// bytes.reserve(2);
1092 /// bytes.put_slice(b"xy");
1093 ///
1094 /// assert_eq!(&b"xy"[..], &bytes[..]);
1095 /// ```
1096 #[inline]
1097 pub fn new() -> BytesMut {
1098 BytesMut::with_capacity(0, crate::DEFAULT_AUTO_EXPAND_SIZE)
1099 }
1100
1101 /// Returns the number of bytes contained in this `BytesMut`.
1102 ///
1103 /// # Examples
1104 ///
1105 /// ```
1106 /// use bytes::BytesMut;
1107 ///
1108 /// let b = BytesMut::from(&b"hello"[..]);
1109 /// assert_eq!(b.len(), 5);
1110 /// ```
1111 #[inline]
1112 pub fn len(&self) -> usize {
1113 self.inner.len()
1114 }
1115
1116 /// Returns true if the `BytesMut` has a length of 0.
1117 ///
1118 /// # Examples
1119 ///
1120 /// ```
1121 /// use bytes::BytesMut;
1122 ///
1123 /// let b = BytesMut::with_capacity(64);
1124 /// assert!(b.is_empty());
1125 /// ```
1126 #[inline]
1127 pub fn is_empty(&self) -> bool {
1128 self.len() == 0
1129 }
1130
1131 /// Returns the number of bytes the `BytesMut` can hold without reallocating.
1132 ///
1133 /// # Examples
1134 ///
1135 /// ```
1136 /// use bytes::BytesMut;
1137 ///
1138 /// let b = BytesMut::with_capacity(64);
1139 /// assert_eq!(b.capacity(), 64);
1140 /// ```
1141 #[inline]
1142 pub fn capacity(&self) -> usize {
1143 self.inner.capacity()
1144 }
1145
1146 /// Converts `self` into an immutable `Bytes`.
1147 ///
1148 /// The conversion is zero cost and is used to indicate that the slice
1149 /// referenced by the handle will no longer be mutated. Once the conversion
1150 /// is done, the handle can be cloned and shared across threads.
1151 ///
1152 /// # Examples
1153 ///
1154 /// ```
1155 /// use bytes::{BytesMut, BufMut};
1156 /// use std::thread;
1157 ///
1158 /// let mut b = BytesMut::with_capacity(64);
1159 /// b.put("hello world");
1160 /// let b1 = b.freeze();
1161 /// let b2 = b1.clone();
1162 ///
1163 /// let th = thread::spawn(move || {
1164 /// assert_eq!(&b1[..], b"hello world");
1165 /// });
1166 ///
1167 /// assert_eq!(&b2[..], b"hello world");
1168 /// th.join().unwrap();
1169 /// ```
1170 #[inline]
1171 pub fn freeze(self) -> Bytes {
1172 Bytes { inner: self.inner }
1173 }
1174
1175 /// Splits the bytes into two at the given index.
1176 ///
1177 /// Afterwards `self` contains elements `[0, at)`, and the returned
1178 /// `BytesMut` contains elements `[at, capacity)`.
1179 ///
1180 /// This is an `O(1)` operation that just increases the reference count
1181 /// and sets a few indices.
1182 ///
1183 /// # Examples
1184 ///
1185 /// ```
1186 /// use bytes::BytesMut;
1187 ///
1188 /// let mut a = BytesMut::from(&b"hello world"[..]);
1189 /// let mut b = a.split_off(5);
1190 ///
1191 /// a[0] = b'j';
1192 /// b[0] = b'!';
1193 ///
1194 /// assert_eq!(&a[..], b"jello");
1195 /// assert_eq!(&b[..], b"!world");
1196 /// ```
1197 ///
1198 /// # Panics
1199 ///
1200 /// Panics if `at > capacity`.
1201 pub fn split_off(&mut self, at: usize) -> BytesMut {
1202 BytesMut {
1203 inner: self.inner.split_off(at),
1204 auto_expand_size: self.auto_expand_size,
1205 }
1206 }
1207
1208 /// Removes the bytes from the current view, returning them in a new
1209 /// `BytesMut` handle.
1210 ///
1211 /// Afterwards, `self` will be empty, but will retain any additional
1212 /// capacity that it had before the operation. This is identical to
1213 /// `self.split_to(self.len())`.
1214 ///
1215 /// This is an `O(1)` operation that just increases the reference count and
1216 /// sets a few indices.
1217 ///
1218 /// # Examples
1219 ///
1220 /// ```
1221 /// use bytes::{BytesMut, BufMut};
1222 ///
1223 /// let mut buf = BytesMut::with_capacity(1024);
1224 /// buf.put(&b"hello world"[..]);
1225 ///
1226 /// let other = buf.take();
1227 ///
1228 /// assert!(buf.is_empty());
1229 /// assert_eq!(1013, buf.capacity());
1230 ///
1231 /// assert_eq!(other, b"hello world"[..]);
1232 /// ```
1233 pub fn take(&mut self) -> BytesMut {
1234 let len = self.len();
1235 self.split_to(len)
1236 }
1237
1238 #[deprecated(since = "0.4.1", note = "use take instead")]
1239 #[doc(hidden)]
1240 pub fn drain(&mut self) -> BytesMut {
1241 self.take()
1242 }
1243
1244 /// Splits the buffer into two at the given index.
1245 ///
1246 /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
1247 /// contains elements `[0, at)`.
1248 ///
1249 /// This is an `O(1)` operation that just increases the reference count and
1250 /// sets a few indices.
1251 ///
1252 /// # Examples
1253 ///
1254 /// ```
1255 /// use bytes::BytesMut;
1256 ///
1257 /// let mut a = BytesMut::from(&b"hello world"[..]);
1258 /// let mut b = a.split_to(5);
1259 ///
1260 /// a[0] = b'!';
1261 /// b[0] = b'j';
1262 ///
1263 /// assert_eq!(&a[..], b"!world");
1264 /// assert_eq!(&b[..], b"jello");
1265 /// ```
1266 ///
1267 /// # Panics
1268 ///
1269 /// Panics if `at > len`.
1270 pub fn split_to(&mut self, at: usize) -> BytesMut {
1271 assert!(at <= self.len());
1272
1273 BytesMut {
1274 inner: self.inner.split_to(at),
1275 auto_expand_size: self.auto_expand_size,
1276 }
1277 }
1278
1279 #[deprecated(since = "0.4.1", note = "use split_to instead")]
1280 #[doc(hidden)]
1281 pub fn drain_to(&mut self, at: usize) -> BytesMut {
1282 self.split_to(at)
1283 }
1284
1285 /// Shortens the buffer, keeping the first `len` bytes and dropping the
1286 /// rest.
1287 ///
1288 /// If `len` is greater than the buffer's current length, this has no
1289 /// effect.
1290 ///
1291 /// The [`split_off`] method can emulate `truncate`, but this causes the
1292 /// excess bytes to be returned instead of dropped.
1293 ///
1294 /// # Examples
1295 ///
1296 /// ```
1297 /// use bytes::BytesMut;
1298 ///
1299 /// let mut buf = BytesMut::from(&b"hello world"[..]);
1300 /// buf.truncate(5);
1301 /// assert_eq!(buf, b"hello"[..]);
1302 /// ```
1303 ///
1304 /// [`split_off`]: #method.split_off
1305 pub fn truncate(&mut self, len: usize) {
1306 self.inner.truncate(len);
1307 }
1308
1309 /// Shortens the buffer, dropping the first `cnt` bytes and keeping the
1310 /// rest.
1311 ///
1312 /// This is the same function as `Buf::advance`, and in the next breaking
1313 /// release of `bytes`, this implementation will be removed in favor of
1314 /// having `BytesMut` implement `Buf`.
1315 ///
1316 /// # Panics
1317 ///
1318 /// This function panics if `cnt` is greater than `self.len()`
1319 #[inline]
1320 pub fn advance(&mut self, cnt: usize) {
1321 assert!(cnt <= self.len(), "cannot advance past `remaining`");
1322 unsafe { self.inner.set_start(cnt); }
1323 }
1324
1325 /// Clears the buffer, removing all data.
1326 ///
1327 /// # Examples
1328 ///
1329 /// ```
1330 /// use bytes::BytesMut;
1331 ///
1332 /// let mut buf = BytesMut::from(&b"hello world"[..]);
1333 /// buf.clear();
1334 /// assert!(buf.is_empty());
1335 /// ```
1336 pub fn clear(&mut self) {
1337 self.truncate(0);
1338 }
1339
1340 /// Resizes the buffer so that `len` is equal to `new_len`.
1341 ///
1342 /// If `new_len` is greater than `len`, the buffer is extended by the
1343 /// difference with each additional byte set to `value`. If `new_len` is
1344 /// less than `len`, the buffer is simply truncated.
1345 ///
1346 /// # Examples
1347 ///
1348 /// ```
1349 /// use bytes::BytesMut;
1350 ///
1351 /// let mut buf = BytesMut::new();
1352 ///
1353 /// buf.resize(3, 0x1);
1354 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
1355 ///
1356 /// buf.resize(2, 0x2);
1357 /// assert_eq!(&buf[..], &[0x1, 0x1]);
1358 ///
1359 /// buf.resize(4, 0x3);
1360 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
1361 /// ```
1362 pub fn resize(&mut self, new_len: usize, value: u8) {
1363 self.inner.resize(new_len, value);
1364 }
1365
1366 /// Sets the length of the buffer.
1367 ///
1368 /// This will explicitly set the size of the buffer without actually
1369 /// modifying the data, so it is up to the caller to ensure that the data
1370 /// has been initialized.
1371 ///
1372 /// # Examples
1373 ///
1374 /// ```
1375 /// use bytes::BytesMut;
1376 ///
1377 /// let mut b = BytesMut::from(&b"hello world"[..]);
1378 ///
1379 /// unsafe {
1380 /// b.set_len(5);
1381 /// }
1382 ///
1383 /// assert_eq!(&b[..], b"hello");
1384 ///
1385 /// unsafe {
1386 /// b.set_len(11);
1387 /// }
1388 ///
1389 /// assert_eq!(&b[..], b"hello world");
1390 /// ```
1391 ///
1392 /// # Panics
1393 ///
1394 /// This method will panic if `len` is out of bounds for the underlying
1395 /// slice or if it comes after the `end` of the configured window.
1396 pub unsafe fn set_len(&mut self, len: usize) {
1397 self.inner.set_len(len)
1398 }
1399
1400 /// Reserves capacity for at least `additional` more bytes to be inserted
1401 /// into the given `BytesMut`.
1402 ///
1403 /// More than `additional` bytes may be reserved in order to avoid frequent
1404 /// reallocations. A call to `reserve` may result in an allocation.
1405 ///
1406 /// Before allocating new buffer space, the function will attempt to reclaim
1407 /// space in the existing buffer. If the current handle references a small
1408 /// view in the original buffer and all other handles have been dropped,
1409 /// and the requested capacity is less than or equal to the existing
1410 /// buffer's capacity, then the current view will be copied to the front of
1411 /// the buffer and the handle will take ownership of the full buffer.
1412 ///
1413 /// # Examples
1414 ///
1415 /// In the following example, a new buffer is allocated.
1416 ///
1417 /// ```
1418 /// use bytes::BytesMut;
1419 ///
1420 /// let mut buf = BytesMut::from(&b"hello"[..]);
1421 /// buf.reserve(64);
1422 /// assert!(buf.capacity() >= 69);
1423 /// ```
1424 ///
1425 /// In the following example, the existing buffer is reclaimed.
1426 ///
1427 /// ```
1428 /// use bytes::{BytesMut, BufMut};
1429 ///
1430 /// let mut buf = BytesMut::with_capacity(128);
1431 /// buf.put(&[0; 64][..]);
1432 ///
1433 /// let ptr = buf.as_ptr();
1434 /// let other = buf.take();
1435 ///
1436 /// assert!(buf.is_empty());
1437 /// assert_eq!(buf.capacity(), 64);
1438 ///
1439 /// drop(other);
1440 /// buf.reserve(128);
1441 ///
1442 /// assert_eq!(buf.capacity(), 128);
1443 /// assert_eq!(buf.as_ptr(), ptr);
1444 /// ```
1445 ///
1446 /// # Panics
1447 ///
1448 /// Panics if the new capacity overflows `usize`.
1449 pub fn reserve(&mut self, additional: usize) {
1450 self.inner.reserve(additional)
1451 }
1452
1453 /// Appends given bytes to this object.
1454 ///
1455 /// If this `BytesMut` object has not enough capacity, it is resized first.
1456 /// So unlike `put_slice` operation, `extend_from_slice` does not panic.
1457 ///
1458 /// # Examples
1459 ///
1460 /// ```
1461 /// use bytes::BytesMut;
1462 ///
1463 /// let mut buf = BytesMut::with_capacity(0);
1464 /// buf.extend_from_slice(b"aaabbb");
1465 /// buf.extend_from_slice(b"cccddd");
1466 ///
1467 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
1468 /// ```
1469 pub fn extend_from_slice(&mut self, extend: &[u8]) {
1470 self.reserve(extend.len());
1471 self.put_slice(extend);
1472 }
1473
1474 /// Combine splitted BytesMut objects back as contiguous.
1475 ///
1476 /// If `BytesMut` objects were not contiguous originally, they will be extended.
1477 ///
1478 /// # Examples
1479 ///
1480 /// ```
1481 /// use bytes::BytesMut;
1482 ///
1483 /// let mut buf = BytesMut::with_capacity(64);
1484 /// buf.extend_from_slice(b"aaabbbcccddd");
1485 ///
1486 /// let splitted = buf.split_off(6);
1487 /// assert_eq!(b"aaabbb", &buf[..]);
1488 /// assert_eq!(b"cccddd", &splitted[..]);
1489 ///
1490 /// buf.unsplit(splitted);
1491 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
1492 /// ```
1493 pub fn unsplit(&mut self, other: BytesMut) {
1494 let ptr;
1495
1496 if other.is_empty() {
1497 return;
1498 }
1499
1500 if self.is_empty() {
1501 *self = other;
1502 return;
1503 }
1504
1505 unsafe {
1506 ptr = self.inner.ptr.offset(self.inner.len as isize);
1507 }
1508 if ptr == other.inner.ptr &&
1509 self.inner.kind() == KIND_ARC &&
1510 other.inner.kind() == KIND_ARC
1511 {
1512 debug_assert_eq!(self.inner.arc.load(Acquire),
1513 other.inner.arc.load(Acquire));
1514 // Contiguous blocks, just combine directly
1515 self.inner.len += other.inner.len;
1516 self.inner.cap += other.inner.cap;
1517 }
1518 else {
1519 self.extend_from_slice(&other);
1520 }
1521 }
1522
1523 #[inline]
1524 fn auto_expand(&mut self, additional: usize) {
1525 let additional = additional.max(self.auto_expand_size);
1526
1527 if self.remaining_mut() < additional {
1528 self.reserve(additional);
1529 }
1530 }
1531}
1532
1533impl BufMut for BytesMut {
1534 #[inline]
1535 fn remaining_mut(&self) -> usize {
1536 self.capacity() - self.len()
1537 }
1538
1539 #[inline]
1540 unsafe fn advance_mut(&mut self, cnt: usize) {
1541 let new_len = self.len() + cnt;
1542
1543 // This call will panic if `cnt` is too big
1544 self.inner.set_len(new_len);
1545 }
1546
1547 #[inline]
1548 unsafe fn bytes_mut(&mut self) -> &mut [u8] {
1549 let len = self.len();
1550
1551 // This will never panic as `len` can never become invalid
1552 &mut self.inner.as_raw()[len..]
1553 }
1554
1555 // copy from bytes.rs
1556 fn put<T: IntoBuf>(&mut self, src: T) where Self: Sized {
1557 let mut src = src.into_buf();
1558
1559 // auto expand by marsqing
1560 self.auto_expand(src.remaining());
1561
1562 assert!(self.remaining_mut() >= src.remaining());
1563
1564 while src.has_remaining() {
1565 let l;
1566
1567 unsafe {
1568 let s = src.bytes();
1569 let d = self.bytes_mut();
1570 l = cmp::min(s.len(), d.len());
1571
1572 ptr::copy_nonoverlapping(
1573 s.as_ptr(),
1574 d.as_mut_ptr(),
1575 l);
1576 }
1577
1578 src.advance(l);
1579 unsafe { self.advance_mut(l); }
1580 }
1581 }
1582
1583 #[inline]
1584 fn put_slice(&mut self, src: &[u8]) {
1585 // auto expand by marsqing
1586 self.auto_expand(src.len());
1587
1588 assert!(self.remaining_mut() >= src.len());
1589
1590 let len = src.len();
1591
1592 unsafe {
1593 self.bytes_mut()[..len].copy_from_slice(src);
1594 self.advance_mut(len);
1595 }
1596 }
1597
1598 #[inline]
1599 fn put_u8(&mut self, n: u8) {
1600 // auto expand by marsqing
1601 self.auto_expand(1);
1602
1603 self.inner.put_u8(n);
1604 }
1605
1606 #[inline]
1607 fn put_i8(&mut self, n: i8) {
1608 self.put_u8(n as u8);
1609 }
1610}
1611
1612impl IntoBuf for BytesMut {
1613 type Buf = Cursor<Self>;
1614
1615 fn into_buf(self) -> Self::Buf {
1616 Cursor::new(self)
1617 }
1618}
1619
1620impl<'a> IntoBuf for &'a BytesMut {
1621 type Buf = Cursor<&'a BytesMut>;
1622
1623 fn into_buf(self) -> Self::Buf {
1624 Cursor::new(self)
1625 }
1626}
1627
1628impl AsRef<[u8]> for BytesMut {
1629 #[inline]
1630 fn as_ref(&self) -> &[u8] {
1631 self.inner.as_ref()
1632 }
1633}
1634
1635impl ops::Deref for BytesMut {
1636 type Target = [u8];
1637
1638 #[inline]
1639 fn deref(&self) -> &[u8] {
1640 self.as_ref()
1641 }
1642}
1643
1644impl AsMut<[u8]> for BytesMut {
1645 fn as_mut(&mut self) -> &mut [u8] {
1646 self.inner.as_mut()
1647 }
1648}
1649
1650impl ops::DerefMut for BytesMut {
1651 #[inline]
1652 fn deref_mut(&mut self) -> &mut [u8] {
1653 self.inner.as_mut()
1654 }
1655}
1656
1657impl From<Vec<u8>> for BytesMut {
1658 fn from(src: Vec<u8>) -> BytesMut {
1659 BytesMut {
1660 inner: Inner::from_vec(src),
1661 auto_expand_size: crate::DEFAULT_AUTO_EXPAND_SIZE,
1662 }
1663 }
1664}
1665
1666impl From<String> for BytesMut {
1667 fn from(src: String) -> BytesMut {
1668 BytesMut::from(src.into_bytes())
1669 }
1670}
1671
1672impl<'a> From<&'a [u8]> for BytesMut {
1673 fn from(src: &'a [u8]) -> BytesMut {
1674 let len = src.len();
1675
1676 if len == 0 {
1677 BytesMut::new()
1678 } else if len <= INLINE_CAP {
1679 unsafe {
1680 let mut inner: Inner = mem::uninitialized();
1681
1682 // Set inline mask
1683 inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
1684 inner.set_inline_len(len);
1685 inner.as_raw()[0..len].copy_from_slice(src);
1686
1687 BytesMut {
1688 inner: inner,
1689 auto_expand_size: crate::DEFAULT_AUTO_EXPAND_SIZE,
1690 }
1691 }
1692 } else {
1693 BytesMut::from(src.to_vec())
1694 }
1695 }
1696}
1697
1698impl<'a> From<&'a str> for BytesMut {
1699 fn from(src: &'a str) -> BytesMut {
1700 BytesMut::from(src.as_bytes())
1701 }
1702}
1703
1704impl From<Bytes> for BytesMut {
1705 fn from(src: Bytes) -> BytesMut {
1706 src.try_mut()
1707 .unwrap_or_else(|src| BytesMut::from(&src[..]))
1708 }
1709}
1710
1711impl PartialEq for BytesMut {
1712 fn eq(&self, other: &BytesMut) -> bool {
1713 self.inner.as_ref() == other.inner.as_ref()
1714 }
1715}
1716
1717impl PartialOrd for BytesMut {
1718 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1719 self.inner.as_ref().partial_cmp(other.inner.as_ref())
1720 }
1721}
1722
1723impl Ord for BytesMut {
1724 fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1725 self.inner.as_ref().cmp(other.inner.as_ref())
1726 }
1727}
1728
1729impl Eq for BytesMut {
1730}
1731
1732impl Default for BytesMut {
1733 #[inline]
1734 fn default() -> BytesMut {
1735 BytesMut::new()
1736 }
1737}
1738
1739impl fmt::Debug for BytesMut {
1740 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1741 fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
1742 }
1743}
1744
1745impl hash::Hash for BytesMut {
1746 fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
1747 let s: &[u8] = self.as_ref();
1748 s.hash(state);
1749 }
1750}
1751
1752impl Borrow<[u8]> for BytesMut {
1753 fn borrow(&self) -> &[u8] {
1754 self.as_ref()
1755 }
1756}
1757
1758impl BorrowMut<[u8]> for BytesMut {
1759 fn borrow_mut(&mut self) -> &mut [u8] {
1760 self.as_mut()
1761 }
1762}
1763
1764impl fmt::Write for BytesMut {
1765 #[inline]
1766 fn write_str(&mut self, s: &str) -> fmt::Result {
1767 if self.remaining_mut() >= s.len() {
1768 self.put_slice(s.as_bytes());
1769 Ok(())
1770 } else {
1771 Err(fmt::Error)
1772 }
1773 }
1774
1775 #[inline]
1776 fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result {
1777 fmt::write(self, args)
1778 }
1779}
1780
1781impl Clone for BytesMut {
1782 fn clone(&self) -> BytesMut {
1783 BytesMut::from(&self[..])
1784 }
1785}
1786
1787impl IntoIterator for BytesMut {
1788 type Item = u8;
1789 type IntoIter = Iter<Cursor<BytesMut>>;
1790
1791 fn into_iter(self) -> Self::IntoIter {
1792 self.into_buf().iter()
1793 }
1794}
1795
1796impl<'a> IntoIterator for &'a BytesMut {
1797 type Item = u8;
1798 type IntoIter = Iter<Cursor<&'a BytesMut>>;
1799
1800 fn into_iter(self) -> Self::IntoIter {
1801 self.into_buf().iter()
1802 }
1803}
1804
1805impl Extend<u8> for BytesMut {
1806 fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
1807 let iter = iter.into_iter();
1808
1809 let (lower, _) = iter.size_hint();
1810 self.reserve(lower);
1811
1812 for b in iter {
1813 unsafe {
1814 self.bytes_mut()[0] = b;
1815 self.advance_mut(1);
1816 }
1817 }
1818 }
1819}
1820
1821impl<'a> Extend<&'a u8> for BytesMut {
1822 fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
1823 self.extend(iter.into_iter().map(|b| *b))
1824 }
1825}
1826
1827/*
1828 *
1829 * ===== Inner =====
1830 *
1831 */
1832
1833impl Inner {
1834 #[inline]
1835 fn from_static(bytes: &'static [u8]) -> Inner {
1836 let ptr = bytes.as_ptr() as *mut u8;
1837
1838 Inner {
1839 // `arc` won't ever store a pointer. Instead, use it to
1840 // track the fact that the `Bytes` handle is backed by a
1841 // static buffer.
1842 arc: AtomicPtr::new(KIND_STATIC as *mut Shared),
1843 ptr: ptr,
1844 len: bytes.len(),
1845 cap: bytes.len(),
1846 }
1847 }
1848
1849 #[inline]
1850 fn from_vec(mut src: Vec<u8>) -> Inner {
1851 let len = src.len();
1852 let cap = src.capacity();
1853 let ptr = src.as_mut_ptr();
1854
1855 mem::forget(src);
1856
1857 let original_capacity_repr = original_capacity_to_repr(cap);
1858 let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
1859
1860 Inner {
1861 arc: AtomicPtr::new(arc as *mut Shared),
1862 ptr: ptr,
1863 len: len,
1864 cap: cap,
1865 }
1866 }
1867
1868 #[inline]
1869 fn with_capacity(capacity: usize) -> Inner {
1870 if capacity <= INLINE_CAP {
1871 unsafe {
1872 // Using uninitialized memory is ~30% faster
1873 let mut inner: Inner = mem::uninitialized();
1874 inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
1875 inner
1876 }
1877 } else {
1878 Inner::from_vec(Vec::with_capacity(capacity))
1879 }
1880 }
1881
1882 /// Return a slice for the handle's view into the shared buffer
1883 #[inline]
1884 fn as_ref(&self) -> &[u8] {
1885 unsafe {
1886 if self.is_inline() {
1887 slice::from_raw_parts(self.inline_ptr(), self.inline_len())
1888 } else {
1889 slice::from_raw_parts(self.ptr, self.len)
1890 }
1891 }
1892 }
1893
1894 /// Return a mutable slice for the handle's view into the shared buffer
1895 #[inline]
1896 fn as_mut(&mut self) -> &mut [u8] {
1897 debug_assert!(!self.is_static());
1898
1899 unsafe {
1900 if self.is_inline() {
1901 slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
1902 } else {
1903 slice::from_raw_parts_mut(self.ptr, self.len)
1904 }
1905 }
1906 }
1907
1908 /// Return a mutable slice for the handle's view into the shared buffer
1909 /// including potentially uninitialized bytes.
1910 #[inline]
1911 unsafe fn as_raw(&mut self) -> &mut [u8] {
1912 debug_assert!(!self.is_static());
1913
1914 if self.is_inline() {
1915 slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
1916 } else {
1917 slice::from_raw_parts_mut(self.ptr, self.cap)
1918 }
1919 }
1920
1921 /// Insert a byte into the next slot and advance the len by 1.
1922 #[inline]
1923 fn put_u8(&mut self, n: u8) {
1924 if self.is_inline() {
1925 let len = self.inline_len();
1926 assert!(len < INLINE_CAP);
1927 unsafe {
1928 *self.inline_ptr().offset(len as isize) = n;
1929 }
1930 self.set_inline_len(len + 1);
1931 } else {
1932 assert!(self.len < self.cap);
1933 unsafe {
1934 *self.ptr.offset(self.len as isize) = n;
1935 }
1936 self.len += 1;
1937 }
1938 }
1939
1940 #[inline]
1941 fn len(&self) -> usize {
1942 if self.is_inline() {
1943 self.inline_len()
1944 } else {
1945 self.len
1946 }
1947 }
1948
1949 /// Pointer to the start of the inline buffer
1950 #[inline]
1951 unsafe fn inline_ptr(&self) -> *mut u8 {
1952 (self as *const Inner as *mut Inner as *mut u8)
1953 .offset(INLINE_DATA_OFFSET)
1954 }
1955
1956 #[inline]
1957 fn inline_len(&self) -> usize {
1958 let p: &usize = unsafe { mem::transmute(&self.arc) };
1959 (p & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
1960 }
1961
1962 /// Set the length of the inline buffer. This is done by writing to the
1963 /// least significant byte of the `arc` field.
1964 #[inline]
1965 fn set_inline_len(&mut self, len: usize) {
1966 debug_assert!(len <= INLINE_CAP);
1967 let p = self.arc.get_mut();
1968 *p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _;
1969 }
1970
1971 /// slice.
1972 #[inline]
1973 unsafe fn set_len(&mut self, len: usize) {
1974 if self.is_inline() {
1975 assert!(len <= INLINE_CAP);
1976 self.set_inline_len(len);
1977 } else {
1978 assert!(len <= self.cap);
1979 self.len = len;
1980 }
1981 }
1982
1983 #[inline]
1984 fn is_empty(&self) -> bool {
1985 self.len() == 0
1986 }
1987
1988 #[inline]
1989 fn capacity(&self) -> usize {
1990 if self.is_inline() {
1991 INLINE_CAP
1992 } else {
1993 self.cap
1994 }
1995 }
1996
1997 fn split_off(&mut self, at: usize) -> Inner {
1998 let mut other = unsafe { self.shallow_clone(true) };
1999
2000 unsafe {
2001 other.set_start(at);
2002 self.set_end(at);
2003 }
2004
2005 return other
2006 }
2007
2008 fn split_to(&mut self, at: usize) -> Inner {
2009 let mut other = unsafe { self.shallow_clone(true) };
2010
2011 unsafe {
2012 other.set_end(at);
2013 self.set_start(at);
2014 }
2015
2016 return other
2017 }
2018
2019 fn truncate(&mut self, len: usize) {
2020 if len <= self.len() {
2021 unsafe { self.set_len(len); }
2022 }
2023 }
2024
2025 fn resize(&mut self, new_len: usize, value: u8) {
2026 let len = self.len();
2027 if new_len > len {
2028 let additional = new_len - len;
2029 self.reserve(additional);
2030 unsafe {
2031 let dst = self.as_raw()[len..].as_mut_ptr();
2032 ptr::write_bytes(dst, value, additional);
2033 self.set_len(new_len);
2034 }
2035 } else {
2036 self.truncate(new_len);
2037 }
2038 }
2039
2040 unsafe fn set_start(&mut self, start: usize) {
2041 // Setting the start to 0 is a no-op, so return early if this is the
2042 // case.
2043 if start == 0 {
2044 return;
2045 }
2046
2047 let kind = self.kind();
2048
2049 // Always check `inline` first, because if the handle is using inline
2050 // data storage, all of the `Inner` struct fields will be gibberish.
2051 if kind == KIND_INLINE {
2052 assert!(start <= INLINE_CAP);
2053
2054 let len = self.inline_len();
2055
2056 if len <= start {
2057 self.set_inline_len(0);
2058 } else {
2059 // `set_start` is essentially shifting data off the front of the
2060 // view. Inlined buffers only track the length of the slice.
2061 // So, to update the start, the data at the new starting point
2062 // is copied to the beginning of the buffer.
2063 let new_len = len - start;
2064
2065 let dst = self.inline_ptr();
2066 let src = (dst as *const u8).offset(start as isize);
2067
2068 ptr::copy(src, dst, new_len);
2069
2070 self.set_inline_len(new_len);
2071 }
2072 } else {
2073 assert!(start <= self.cap);
2074
2075 if kind == KIND_VEC {
2076 // Setting the start when in vec representation is a little more
2077 // complicated. First, we have to track how far ahead the
2078 // "start" of the byte buffer from the beginning of the vec. We
2079 // also have to ensure that we don't exceed the maximum shift.
2080 let (mut pos, prev) = self.uncoordinated_get_vec_pos();
2081 pos += start;
2082
2083 if pos <= MAX_VEC_POS {
2084 self.uncoordinated_set_vec_pos(pos, prev);
2085 } else {
2086 // The repr must be upgraded to ARC. This will never happen
2087 // on 64 bit systems and will only happen on 32 bit systems
2088 // when shifting past 134,217,727 bytes. As such, we don't
2089 // worry too much about performance here.
2090 let _ = self.shallow_clone(true);
2091 }
2092 }
2093
2094 // Updating the start of the view is setting `ptr` to point to the
2095 // new start and updating the `len` field to reflect the new length
2096 // of the view.
2097 self.ptr = self.ptr.offset(start as isize);
2098
2099 if self.len >= start {
2100 self.len -= start;
2101 } else {
2102 self.len = 0;
2103 }
2104
2105 self.cap -= start;
2106 }
2107 }
2108
2109 unsafe fn set_end(&mut self, end: usize) {
2110 debug_assert!(self.is_shared());
2111
2112 // Always check `inline` first, because if the handle is using inline
2113 // data storage, all of the `Inner` struct fields will be gibberish.
2114 if self.is_inline() {
2115 assert!(end <= INLINE_CAP);
2116 let new_len = cmp::min(self.inline_len(), end);
2117 self.set_inline_len(new_len);
2118 } else {
2119 assert!(end <= self.cap);
2120
2121 self.cap = end;
2122 self.len = cmp::min(self.len, end);
2123 }
2124 }
2125
2126 /// Checks if it is safe to mutate the memory
2127 fn is_mut_safe(&mut self) -> bool {
2128 let kind = self.kind();
2129
2130 // Always check `inline` first, because if the handle is using inline
2131 // data storage, all of the `Inner` struct fields will be gibberish.
2132 if kind == KIND_INLINE {
2133 // Inlined buffers can always be mutated as the data is never shared
2134 // across handles.
2135 true
2136 } else if kind == KIND_VEC {
2137 true
2138 } else if kind == KIND_STATIC {
2139 false
2140 } else {
2141 // Otherwise, the underlying buffer is potentially shared with other
2142 // handles, so the ref_count needs to be checked.
2143 unsafe { (**self.arc.get_mut()).is_unique() }
2144 }
2145 }
2146
2147 /// Increments the ref count. This should only be done if it is known that
2148 /// it can be done safely. As such, this fn is not public, instead other
2149 /// fns will use this one while maintaining the guarantees.
2150 /// Parameter `mut_self` should only be set to `true` if caller holds
2151 /// `&mut self` reference.
2152 ///
2153 /// "Safely" is defined as not exposing two `BytesMut` values that point to
2154 /// the same byte window.
2155 ///
2156 /// This function is thread safe.
2157 unsafe fn shallow_clone(&self, mut_self: bool) -> Inner {
2158 // Always check `inline` first, because if the handle is using inline
2159 // data storage, all of the `Inner` struct fields will be gibberish.
2160 //
2161 // Additionally, if kind is STATIC, then Arc is *never* changed, making
2162 // it safe and faster to check for it now before an atomic acquire.
2163
2164 if self.is_inline_or_static() {
2165 // In this case, a shallow_clone still involves copying the data.
2166 let mut inner: Inner = mem::uninitialized();
2167 ptr::copy_nonoverlapping(
2168 self,
2169 &mut inner,
2170 1,
2171 );
2172 inner
2173 } else {
2174 self.shallow_clone_sync(mut_self)
2175 }
2176 }
2177
2178
2179 #[cold]
2180 unsafe fn shallow_clone_sync(&self, mut_self: bool) -> Inner {
2181 // The function requires `&self`, this means that `shallow_clone`
2182 // could be called concurrently.
2183 //
2184 // The first step is to load the value of `arc`. This will determine
2185 // how to proceed. The `Acquire` ordering synchronizes with the
2186 // `compare_and_swap` that comes later in this function. The goal is
2187 // to ensure that if `arc` is currently set to point to a `Shared`,
2188 // that the current thread acquires the associated memory.
2189 let arc = self.arc.load(Acquire);
2190 let kind = arc as usize & KIND_MASK;
2191
2192 if kind == KIND_ARC {
2193 self.shallow_clone_arc(arc)
2194 } else {
2195 assert!(kind == KIND_VEC);
2196 self.shallow_clone_vec(arc as usize, mut_self)
2197 }
2198 }
2199
2200 unsafe fn shallow_clone_arc(&self, arc: *mut Shared) -> Inner {
2201 debug_assert!(arc as usize & KIND_MASK == KIND_ARC);
2202
2203 let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
2204
2205 if old_size == usize::MAX {
2206 abort();
2207 }
2208
2209 Inner {
2210 arc: AtomicPtr::new(arc),
2211 .. *self
2212 }
2213 }
2214
2215 #[cold]
2216 unsafe fn shallow_clone_vec(&self, arc: usize, mut_self: bool) -> Inner {
2217 // If the buffer is still tracked in a `Vec<u8>`. It is time to
2218 // promote the vec to an `Arc`. This could potentially be called
2219 // concurrently, so some care must be taken.
2220
2221 debug_assert!(arc & KIND_MASK == KIND_VEC);
2222
2223 let original_capacity_repr =
2224 (arc as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
2225
2226 // The vec offset cannot be concurrently mutated, so there
2227 // should be no danger reading it.
2228 let off = (arc as usize) >> VEC_POS_OFFSET;
2229
2230 // First, allocate a new `Shared` instance containing the
2231 // `Vec` fields. It's important to note that `ptr`, `len`,
2232 // and `cap` cannot be mutated without having `&mut self`.
2233 // This means that these fields will not be concurrently
2234 // updated and since the buffer hasn't been promoted to an
2235 // `Arc`, those three fields still are the components of the
2236 // vector.
2237 let shared = Box::new(Shared {
2238 vec: rebuild_vec(self.ptr, self.len, self.cap, off),
2239 original_capacity_repr: original_capacity_repr,
2240 // Initialize refcount to 2. One for this reference, and one
2241 // for the new clone that will be returned from
2242 // `shallow_clone`.
2243 ref_count: AtomicUsize::new(2),
2244 });
2245
2246 let shared = Box::into_raw(shared);
2247
2248 // The pointer should be aligned, so this assert should
2249 // always succeed.
2250 debug_assert!(0 == (shared as usize & 0b11));
2251
2252 // If there are no references to self in other threads,
2253 // expensive atomic operations can be avoided.
2254 if mut_self {
2255 self.arc.store(shared, Relaxed);
2256 return Inner {
2257 arc: AtomicPtr::new(shared),
2258 .. *self
2259 };
2260 }
2261
2262 // Try compare & swapping the pointer into the `arc` field.
2263 // `Release` is used synchronize with other threads that
2264 // will load the `arc` field.
2265 //
2266 // If the `compare_and_swap` fails, then the thread lost the
2267 // race to promote the buffer to shared. The `Acquire`
2268 // ordering will synchronize with the `compare_and_swap`
2269 // that happened in the other thread and the `Shared`
2270 // pointed to by `actual` will be visible.
2271 let actual = self.arc.compare_and_swap(arc as *mut Shared, shared, AcqRel);
2272
2273 if actual as usize == arc {
2274 // The upgrade was successful, the new handle can be
2275 // returned.
2276 return Inner {
2277 arc: AtomicPtr::new(shared),
2278 .. *self
2279 };
2280 }
2281
2282 // The upgrade failed, a concurrent clone happened. Release
2283 // the allocation that was made in this thread, it will not
2284 // be needed.
2285 let shared = Box::from_raw(shared);
2286 mem::forget(*shared);
2287
2288 // Buffer already promoted to shared storage, so increment ref
2289 // count.
2290 self.shallow_clone_arc(actual)
2291 }
2292
2293 #[inline]
2294 fn reserve(&mut self, additional: usize) {
2295 let len = self.len();
2296 let rem = self.capacity() - len;
2297
2298 if additional <= rem {
2299 // The handle can already store at least `additional` more bytes, so
2300 // there is no further work needed to be done.
2301 return;
2302 }
2303
2304 let kind = self.kind();
2305
2306 // Always check `inline` first, because if the handle is using inline
2307 // data storage, all of the `Inner` struct fields will be gibberish.
2308 if kind == KIND_INLINE {
2309 let new_cap = len + additional;
2310
2311 // Promote to a vector
2312 let mut v = Vec::with_capacity(new_cap);
2313 v.extend_from_slice(self.as_ref());
2314
2315 self.ptr = v.as_mut_ptr();
2316 self.len = v.len();
2317 self.cap = v.capacity();
2318
2319 // Since the minimum capacity is `INLINE_CAP`, don't bother encoding
2320 // the original capacity as INLINE_CAP
2321 self.arc = AtomicPtr::new(KIND_VEC as *mut Shared);
2322
2323 mem::forget(v);
2324 return;
2325 }
2326
2327 if kind == KIND_VEC {
2328 // If there's enough free space before the start of the buffer, then
2329 // just copy the data backwards and reuse the already-allocated
2330 // space.
2331 //
2332 // Otherwise, since backed by a vector, use `Vec::reserve`
2333 unsafe {
2334 let (off, prev) = self.uncoordinated_get_vec_pos();
2335
2336 // Only reuse space if we stand to gain at least capacity/2
2337 // bytes of space back
2338 if off >= additional && off >= (self.cap / 2) {
2339 // There's space - reuse it
2340 //
2341 // Just move the pointer back to the start after copying
2342 // data back.
2343 let base_ptr = self.ptr.offset(-(off as isize));
2344 ptr::copy(self.ptr, base_ptr, self.len);
2345 self.ptr = base_ptr;
2346 self.uncoordinated_set_vec_pos(0, prev);
2347
2348 // Length stays constant, but since we moved backwards we
2349 // can gain capacity back.
2350 self.cap += off;
2351 } else {
2352 // No space - allocate more
2353 let mut v = rebuild_vec(self.ptr, self.len, self.cap, off);
2354 v.reserve(additional);
2355
2356 // Update the info
2357 self.ptr = v.as_mut_ptr().offset(off as isize);
2358 self.len = v.len() - off;
2359 self.cap = v.capacity() - off;
2360
2361 // Drop the vec reference
2362 mem::forget(v);
2363 }
2364 return;
2365 }
2366 }
2367
2368 let arc = *self.arc.get_mut();
2369
2370 debug_assert!(kind == KIND_ARC);
2371
2372 // Reserving involves abandoning the currently shared buffer and
2373 // allocating a new vector with the requested capacity.
2374 //
2375 // Compute the new capacity
2376 let mut new_cap = len + additional;
2377 let original_capacity;
2378 let original_capacity_repr;
2379
2380 unsafe {
2381 original_capacity_repr = (*arc).original_capacity_repr;
2382 original_capacity = original_capacity_from_repr(original_capacity_repr);
2383
2384 // First, try to reclaim the buffer. This is possible if the current
2385 // handle is the only outstanding handle pointing to the buffer.
2386 if (*arc).is_unique() {
2387 // This is the only handle to the buffer. It can be reclaimed.
2388 // However, before doing the work of copying data, check to make
2389 // sure that the vector has enough capacity.
2390 let v = &mut (*arc).vec;
2391
2392 if v.capacity() >= new_cap {
2393 // The capacity is sufficient, reclaim the buffer
2394 let ptr = v.as_mut_ptr();
2395
2396 ptr::copy(self.ptr, ptr, len);
2397
2398 self.ptr = ptr;
2399 self.cap = v.capacity();
2400
2401 return;
2402 }
2403
2404 // The vector capacity is not sufficient. The reserve request is
2405 // asking for more than the initial buffer capacity. Allocate more
2406 // than requested if `new_cap` is not much bigger than the current
2407 // capacity.
2408 //
2409 // There are some situations, using `reserve_exact` that the
2410 // buffer capacity could be below `original_capacity`, so do a
2411 // check.
2412 new_cap = cmp::max(
2413 cmp::max(v.capacity() << 1, new_cap),
2414 original_capacity);
2415 } else {
2416 new_cap = cmp::max(new_cap, original_capacity);
2417 }
2418 }
2419
2420 // Create a new vector to store the data
2421 let mut v = Vec::with_capacity(new_cap);
2422
2423 // Copy the bytes
2424 v.extend_from_slice(self.as_ref());
2425
2426 // Release the shared handle. This must be done *after* the bytes are
2427 // copied.
2428 release_shared(arc);
2429
2430 // Update self
2431 self.ptr = v.as_mut_ptr();
2432 self.len = v.len();
2433 self.cap = v.capacity();
2434
2435 let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
2436
2437 self.arc = AtomicPtr::new(arc as *mut Shared);
2438
2439 // Forget the vector handle
2440 mem::forget(v);
2441 }
2442
2443 /// Returns true if the buffer is stored inline
2444 #[inline]
2445 fn is_inline(&self) -> bool {
2446 self.kind() == KIND_INLINE
2447 }
2448
2449 #[inline]
2450 fn is_inline_or_static(&self) -> bool {
2451 // The value returned by `kind` isn't itself safe, but the value could
2452 // inform what operations to take, and unsafely do something without
2453 // synchronization.
2454 //
2455 // KIND_INLINE and KIND_STATIC will *never* change, so branches on that
2456 // information is safe.
2457 let kind = self.kind();
2458 kind == KIND_INLINE || kind == KIND_STATIC
2459 }
2460
2461 /// Used for `debug_assert` statements. &mut is used to guarantee that it is
2462 /// safe to check VEC_KIND
2463 #[inline]
2464 fn is_shared(&mut self) -> bool {
2465 match self.kind() {
2466 KIND_VEC => false,
2467 _ => true,
2468 }
2469 }
2470
2471 /// Used for `debug_assert` statements
2472 #[inline]
2473 fn is_static(&mut self) -> bool {
2474 match self.kind() {
2475 KIND_STATIC => true,
2476 _ => false,
2477 }
2478 }
2479
2480 #[inline]
2481 fn kind(&self) -> usize {
2482 // This function is going to probably raise some eyebrows. The function
2483 // returns true if the buffer is stored inline. This is done by checking
2484 // the least significant bit in the `arc` field.
2485 //
2486 // Now, you may notice that `arc` is an `AtomicPtr` and this is
2487 // accessing it as a normal field without performing an atomic load...
2488 //
2489 // Again, the function only cares about the least significant bit, and
2490 // this bit is set when `Inner` is created and never changed after that.
2491 // All platforms have atomic "word" operations and won't randomly flip
2492 // bits, so even without any explicit atomic operations, reading the
2493 // flag will be correct.
2494 //
2495 // This is undefind behavior due to a data race, but experimental
2496 // evidence shows that it works in practice (discussion:
2497 // https://internals.rust-lang.org/t/bit-wise-reasoning-for-atomic-accesses/8853).
2498 //
2499 // This function is very critical performance wise as it is called for
2500 // every operation. Performing an atomic load would mess with the
2501 // compiler's ability to optimize. Simple benchmarks show up to a 10%
2502 // slowdown using a `Relaxed` atomic load on x86.
2503
2504 #[cfg(target_endian = "little")]
2505 #[inline]
2506 fn imp(arc: &AtomicPtr<Shared>) -> usize {
2507 unsafe {
2508 let p: *const u8 = mem::transmute(arc);
2509 (*p as usize) & KIND_MASK
2510 }
2511 }
2512
2513 #[cfg(target_endian = "big")]
2514 #[inline]
2515 fn imp(arc: &AtomicPtr<Shared>) -> usize {
2516 unsafe {
2517 let p: *const usize = mem::transmute(arc);
2518 *p & KIND_MASK
2519 }
2520 }
2521
2522 imp(&self.arc)
2523 }
2524
2525 #[inline]
2526 fn uncoordinated_get_vec_pos(&mut self) -> (usize, usize) {
2527 // Similar to above, this is a pretty crazed function. This should only
2528 // be called when in the KIND_VEC mode. This + the &mut self argument
2529 // guarantees that there is no possibility of concurrent calls to this
2530 // function.
2531 let prev = unsafe {
2532 let p: &AtomicPtr<Shared> = &self.arc;
2533 let p: *const usize = mem::transmute(p);
2534 *p
2535 };
2536
2537 (prev >> VEC_POS_OFFSET, prev)
2538 }
2539
2540 #[inline]
2541 fn uncoordinated_set_vec_pos(&mut self, pos: usize, prev: usize) {
2542 // Once more... crazy
2543 debug_assert!(pos <= MAX_VEC_POS);
2544
2545 unsafe {
2546 let p: &mut AtomicPtr<Shared> = &mut self.arc;
2547 let p: &mut usize = mem::transmute(p);
2548 *p = (pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK);
2549 }
2550 }
2551}
2552
2553fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
2554 unsafe {
2555 let ptr = ptr.offset(-(off as isize));
2556 len += off;
2557 cap += off;
2558
2559 Vec::from_raw_parts(ptr, len, cap)
2560 }
2561}
2562
2563impl Drop for Inner {
2564 fn drop(&mut self) {
2565 let kind = self.kind();
2566
2567 if kind == KIND_VEC {
2568 let (off, _) = self.uncoordinated_get_vec_pos();
2569
2570 // Vector storage, free the vector
2571 let _ = rebuild_vec(self.ptr, self.len, self.cap, off);
2572 } else if kind == KIND_ARC {
2573 release_shared(*self.arc.get_mut());
2574 }
2575 }
2576}
2577
2578fn release_shared(ptr: *mut Shared) {
2579 // `Shared` storage... follow the drop steps from Arc.
2580 unsafe {
2581 if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
2582 return;
2583 }
2584
2585 // This fence is needed to prevent reordering of use of the data and
2586 // deletion of the data. Because it is marked `Release`, the decreasing
2587 // of the reference count synchronizes with this `Acquire` fence. This
2588 // means that use of the data happens before decreasing the reference
2589 // count, which happens before this fence, which happens before the
2590 // deletion of the data.
2591 //
2592 // As explained in the [Boost documentation][1],
2593 //
2594 // > It is important to enforce any possible access to the object in one
2595 // > thread (through an existing reference) to *happen before* deleting
2596 // > the object in a different thread. This is achieved by a "release"
2597 // > operation after dropping a reference (any access to the object
2598 // > through this reference must obviously happened before), and an
2599 // > "acquire" operation before deleting the object.
2600 //
2601 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2602 atomic::fence(Acquire);
2603
2604 // Drop the data
2605 Box::from_raw(ptr);
2606 }
2607}
2608
2609impl Shared {
2610 fn is_unique(&self) -> bool {
2611 // The goal is to check if the current handle is the only handle
2612 // that currently has access to the buffer. This is done by
2613 // checking if the `ref_count` is currently 1.
2614 //
2615 // The `Acquire` ordering synchronizes with the `Release` as
2616 // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
2617 // operation guarantees that any mutations done in other threads
2618 // are ordered before the `ref_count` is decremented. As such,
2619 // this `Acquire` will guarantee that those mutations are
2620 // visible to the current thread.
2621 self.ref_count.load(Acquire) == 1
2622 }
2623}
2624
2625fn original_capacity_to_repr(cap: usize) -> usize {
2626 let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
2627 cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH)
2628}
2629
2630fn original_capacity_from_repr(repr: usize) -> usize {
2631 if repr == 0 {
2632 return 0;
2633 }
2634
2635 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
2636}
2637
2638#[test]
2639fn test_original_capacity_to_repr() {
2640 assert_eq!(original_capacity_to_repr(0), 0);
2641
2642 let max_width = 32;
2643
2644 for width in 1..(max_width + 1) {
2645 let cap = 1 << width - 1;
2646
2647 let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
2648 0
2649 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
2650 width - MIN_ORIGINAL_CAPACITY_WIDTH
2651 } else {
2652 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
2653 };
2654
2655 assert_eq!(original_capacity_to_repr(cap), expected);
2656
2657 if width > 1 {
2658 assert_eq!(original_capacity_to_repr(cap + 1), expected);
2659 }
2660
2661 // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
2662 if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
2663 assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
2664 assert_eq!(original_capacity_to_repr(cap + 76), expected);
2665 } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
2666 assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
2667 assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
2668 }
2669 }
2670}
2671
2672#[test]
2673fn test_original_capacity_from_repr() {
2674 assert_eq!(0, original_capacity_from_repr(0));
2675
2676 let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
2677
2678 assert_eq!(min_cap, original_capacity_from_repr(1));
2679 assert_eq!(min_cap * 2, original_capacity_from_repr(2));
2680 assert_eq!(min_cap * 4, original_capacity_from_repr(3));
2681 assert_eq!(min_cap * 8, original_capacity_from_repr(4));
2682 assert_eq!(min_cap * 16, original_capacity_from_repr(5));
2683 assert_eq!(min_cap * 32, original_capacity_from_repr(6));
2684 assert_eq!(min_cap * 64, original_capacity_from_repr(7));
2685}
2686
2687unsafe impl Send for Inner {}
2688unsafe impl Sync for Inner {}
2689
2690/*
2691 *
2692 * ===== PartialEq / PartialOrd =====
2693 *
2694 */
2695
2696impl PartialEq<[u8]> for BytesMut {
2697 fn eq(&self, other: &[u8]) -> bool {
2698 &**self == other
2699 }
2700}
2701
2702impl PartialOrd<[u8]> for BytesMut {
2703 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
2704 (**self).partial_cmp(other)
2705 }
2706}
2707
2708impl PartialEq<BytesMut> for [u8] {
2709 fn eq(&self, other: &BytesMut) -> bool {
2710 *other == *self
2711 }
2712}
2713
2714impl PartialOrd<BytesMut> for [u8] {
2715 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
2716 other.partial_cmp(self)
2717 }
2718}
2719
2720impl PartialEq<str> for BytesMut {
2721 fn eq(&self, other: &str) -> bool {
2722 &**self == other.as_bytes()
2723 }
2724}
2725
2726impl PartialOrd<str> for BytesMut {
2727 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
2728 (**self).partial_cmp(other.as_bytes())
2729 }
2730}
2731
2732impl PartialEq<BytesMut> for str {
2733 fn eq(&self, other: &BytesMut) -> bool {
2734 *other == *self
2735 }
2736}
2737
2738impl PartialOrd<BytesMut> for str {
2739 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
2740 other.partial_cmp(self)
2741 }
2742}
2743
2744impl PartialEq<Vec<u8>> for BytesMut {
2745 fn eq(&self, other: &Vec<u8>) -> bool {
2746 *self == &other[..]
2747 }
2748}
2749
2750impl PartialOrd<Vec<u8>> for BytesMut {
2751 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
2752 (**self).partial_cmp(&other[..])
2753 }
2754}
2755
2756impl PartialEq<BytesMut> for Vec<u8> {
2757 fn eq(&self, other: &BytesMut) -> bool {
2758 *other == *self
2759 }
2760}
2761
2762impl PartialOrd<BytesMut> for Vec<u8> {
2763 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
2764 other.partial_cmp(self)
2765 }
2766}
2767
2768impl PartialEq<String> for BytesMut {
2769 fn eq(&self, other: &String) -> bool {
2770 *self == &other[..]
2771 }
2772}
2773
2774impl PartialOrd<String> for BytesMut {
2775 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
2776 (**self).partial_cmp(other.as_bytes())
2777 }
2778}
2779
2780impl PartialEq<BytesMut> for String {
2781 fn eq(&self, other: &BytesMut) -> bool {
2782 *other == *self
2783 }
2784}
2785
2786impl PartialOrd<BytesMut> for String {
2787 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
2788 other.partial_cmp(self)
2789 }
2790}
2791
2792impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
2793 where BytesMut: PartialEq<T>
2794{
2795 fn eq(&self, other: &&'a T) -> bool {
2796 *self == **other
2797 }
2798}
2799
2800impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
2801 where BytesMut: PartialOrd<T>
2802{
2803 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
2804 self.partial_cmp(*other)
2805 }
2806}
2807
2808impl<'a> PartialEq<BytesMut> for &'a [u8] {
2809 fn eq(&self, other: &BytesMut) -> bool {
2810 *other == *self
2811 }
2812}
2813
2814impl<'a> PartialOrd<BytesMut> for &'a [u8] {
2815 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
2816 other.partial_cmp(self)
2817 }
2818}
2819
2820impl<'a> PartialEq<BytesMut> for &'a str {
2821 fn eq(&self, other: &BytesMut) -> bool {
2822 *other == *self
2823 }
2824}
2825
2826impl<'a> PartialOrd<BytesMut> for &'a str {
2827 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
2828 other.partial_cmp(self)
2829 }
2830}
2831
2832impl PartialEq<[u8]> for Bytes {
2833 fn eq(&self, other: &[u8]) -> bool {
2834 self.inner.as_ref() == other
2835 }
2836}
2837
2838impl PartialOrd<[u8]> for Bytes {
2839 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
2840 self.inner.as_ref().partial_cmp(other)
2841 }
2842}
2843
2844impl PartialEq<Bytes> for [u8] {
2845 fn eq(&self, other: &Bytes) -> bool {
2846 *other == *self
2847 }
2848}
2849
2850impl PartialOrd<Bytes> for [u8] {
2851 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
2852 other.partial_cmp(self)
2853 }
2854}
2855
2856impl PartialEq<str> for Bytes {
2857 fn eq(&self, other: &str) -> bool {
2858 self.inner.as_ref() == other.as_bytes()
2859 }
2860}
2861
2862impl PartialOrd<str> for Bytes {
2863 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
2864 self.inner.as_ref().partial_cmp(other.as_bytes())
2865 }
2866}
2867
2868impl PartialEq<Bytes> for str {
2869 fn eq(&self, other: &Bytes) -> bool {
2870 *other == *self
2871 }
2872}
2873
2874impl PartialOrd<Bytes> for str {
2875 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
2876 other.partial_cmp(self)
2877 }
2878}
2879
2880impl PartialEq<Vec<u8>> for Bytes {
2881 fn eq(&self, other: &Vec<u8>) -> bool {
2882 *self == &other[..]
2883 }
2884}
2885
2886impl PartialOrd<Vec<u8>> for Bytes {
2887 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
2888 self.inner.as_ref().partial_cmp(&other[..])
2889 }
2890}
2891
2892impl PartialEq<Bytes> for Vec<u8> {
2893 fn eq(&self, other: &Bytes) -> bool {
2894 *other == *self
2895 }
2896}
2897
2898impl PartialOrd<Bytes> for Vec<u8> {
2899 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
2900 other.partial_cmp(self)
2901 }
2902}
2903
2904impl PartialEq<String> for Bytes {
2905 fn eq(&self, other: &String) -> bool {
2906 *self == &other[..]
2907 }
2908}
2909
2910impl PartialOrd<String> for Bytes {
2911 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
2912 self.inner.as_ref().partial_cmp(other.as_bytes())
2913 }
2914}
2915
2916impl PartialEq<Bytes> for String {
2917 fn eq(&self, other: &Bytes) -> bool {
2918 *other == *self
2919 }
2920}
2921
2922impl PartialOrd<Bytes> for String {
2923 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
2924 other.partial_cmp(self)
2925 }
2926}
2927
2928impl<'a> PartialEq<Bytes> for &'a [u8] {
2929 fn eq(&self, other: &Bytes) -> bool {
2930 *other == *self
2931 }
2932}
2933
2934impl<'a> PartialOrd<Bytes> for &'a [u8] {
2935 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
2936 other.partial_cmp(self)
2937 }
2938}
2939
2940impl<'a> PartialEq<Bytes> for &'a str {
2941 fn eq(&self, other: &Bytes) -> bool {
2942 *other == *self
2943 }
2944}
2945
2946impl<'a> PartialOrd<Bytes> for &'a str {
2947 fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
2948 other.partial_cmp(self)
2949 }
2950}
2951
2952impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
2953 where Bytes: PartialEq<T>
2954{
2955 fn eq(&self, other: &&'a T) -> bool {
2956 *self == **other
2957 }
2958}
2959
2960impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
2961 where Bytes: PartialOrd<T>
2962{
2963 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
2964 self.partial_cmp(&**other)
2965 }
2966}
2967
2968impl PartialEq<BytesMut> for Bytes
2969{
2970 fn eq(&self, other: &BytesMut) -> bool {
2971 &other[..] == &self[..]
2972 }
2973}
2974
2975impl PartialEq<Bytes> for BytesMut
2976{
2977 fn eq(&self, other: &Bytes) -> bool {
2978 &other[..] == &self[..]
2979 }
2980}
2981
2982// While there is `std::process:abort`, it's only available in Rust 1.17, and
2983// our minimum supported version is currently 1.15. So, this acts as an abort
2984// by triggering a double panic, which always aborts in Rust.
2985struct Abort;
2986
2987impl Drop for Abort {
2988 fn drop(&mut self) {
2989 panic!();
2990 }
2991}
2992
2993#[inline(never)]
2994#[cold]
2995fn abort() {
2996 let _a = Abort;
2997 panic!();
2998}