use {IntoBuf, Buf, BufMut};
use buf::Iter;
use debug;
use std::{cmp, fmt, mem, hash, ops, slice, ptr, usize};
use std::borrow::{Borrow, BorrowMut};
use std::io::Cursor;
use std::sync::atomic::{self, AtomicUsize, AtomicPtr};
use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel};
use std::iter::{FromIterator, Iterator};
pub struct Bytes {
inner: Inner,
}
pub struct BytesMut {
inner: Inner,
}
#[cfg(target_endian = "little")]
#[repr(C)]
struct Inner {
arc: AtomicPtr<Shared>,
ptr: *mut u8,
len: usize,
cap: usize,
}
#[cfg(target_endian = "big")]
#[repr(C)]
struct Inner {
ptr: *mut u8,
len: usize,
cap: usize,
arc: AtomicPtr<Shared>,
}
struct Shared {
vec: Vec<u8>,
original_capacity_repr: usize,
ref_count: AtomicUsize,
}
const KIND_ARC: usize = 0b00;
const KIND_INLINE: usize = 0b01;
const KIND_STATIC: usize = 0b10;
const KIND_VEC: usize = 0b11;
const KIND_MASK: usize = 0b11;
const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
const ORIGINAL_CAPACITY_OFFSET: usize = 2;
const VEC_POS_OFFSET: usize = 5;
const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
const NOT_VEC_POS_MASK: usize = 0b11111;
const INLINE_LEN_MASK: usize = 0b11111100;
const INLINE_LEN_OFFSET: usize = 2;
#[cfg(target_endian = "little")]
const INLINE_DATA_OFFSET: isize = 1;
#[cfg(target_endian = "big")]
const INLINE_DATA_OFFSET: isize = 0;
#[cfg(target_pointer_width = "64")]
const PTR_WIDTH: usize = 64;
#[cfg(target_pointer_width = "32")]
const PTR_WIDTH: usize = 32;
#[cfg(target_pointer_width = "64")]
const INLINE_CAP: usize = 4 * 8 - 1;
#[cfg(target_pointer_width = "32")]
const INLINE_CAP: usize = 4 * 4 - 1;
impl Bytes {
#[inline]
pub fn with_capacity(capacity: usize) -> Bytes {
Bytes {
inner: Inner::with_capacity(capacity),
}
}
#[inline]
pub fn new() -> Bytes {
Bytes::with_capacity(0)
}
#[inline]
pub fn from_static(bytes: &'static [u8]) -> Bytes {
Bytes {
inner: Inner::from_static(bytes),
}
}
#[inline]
pub fn len(&self) -> usize {
self.inner.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
pub fn slice(&self, begin: usize, end: usize) -> Bytes {
assert!(begin <= end);
assert!(end <= self.len());
if end - begin <= INLINE_CAP {
return Bytes::from(&self[begin..end]);
}
let mut ret = self.clone();
unsafe {
ret.inner.set_end(end);
ret.inner.set_start(begin);
}
ret
}
pub fn slice_from(&self, begin: usize) -> Bytes {
self.slice(begin, self.len())
}
pub fn slice_to(&self, end: usize) -> Bytes {
self.slice(0, end)
}
pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
let bytes_p = self.as_ptr() as usize;
let bytes_len = self.len();
let sub_p = subset.as_ptr() as usize;
let sub_len = subset.len();
assert!(sub_p >= bytes_p);
assert!(sub_p + sub_len <= bytes_p + bytes_len);
let sub_offset = sub_p - bytes_p;
self.slice(sub_offset, sub_offset + sub_len)
}
pub fn split_off(&mut self, at: usize) -> Bytes {
assert!(at <= self.len());
if at == self.len() {
return Bytes::new();
}
if at == 0 {
return mem::replace(self, Bytes::new());
}
Bytes {
inner: self.inner.split_off(at),
}
}
pub fn split_to(&mut self, at: usize) -> Bytes {
assert!(at <= self.len());
if at == self.len() {
return mem::replace(self, Bytes::new());
}
if at == 0 {
return Bytes::new();
}
Bytes {
inner: self.inner.split_to(at),
}
}
#[deprecated(since = "0.4.1", note = "use split_to instead")]
#[doc(hidden)]
pub fn drain_to(&mut self, at: usize) -> Bytes {
self.split_to(at)
}
pub fn truncate(&mut self, len: usize) {
self.inner.truncate(len);
}
#[inline]
pub fn advance(&mut self, cnt: usize) {
assert!(cnt <= self.len(), "cannot advance past `remaining`");
unsafe { self.inner.set_start(cnt); }
}
pub fn clear(&mut self) {
self.truncate(0);
}
pub fn try_mut(mut self) -> Result<BytesMut, Bytes> {
if self.inner.is_mut_safe() {
Ok(BytesMut { inner: self.inner })
} else {
Err(self)
}
}
pub fn extend_from_slice(&mut self, extend: &[u8]) {
if extend.is_empty() {
return;
}
let new_cap = self.len().checked_add(extend.len()).expect("capacity overflow");
let result = match mem::replace(self, Bytes::new()).try_mut() {
Ok(mut bytes_mut) => {
bytes_mut.extend_from_slice(extend);
bytes_mut
},
Err(bytes) => {
let mut bytes_mut = BytesMut::with_capacity(new_cap);
bytes_mut.put_slice(&bytes);
bytes_mut.put_slice(extend);
bytes_mut
}
};
mem::replace(self, result.freeze());
}
}
impl IntoBuf for Bytes {
type Buf = Cursor<Self>;
fn into_buf(self) -> Self::Buf {
Cursor::new(self)
}
}
impl<'a> IntoBuf for &'a Bytes {
type Buf = Cursor<Self>;
fn into_buf(self) -> Self::Buf {
Cursor::new(self)
}
}
impl Clone for Bytes {
fn clone(&self) -> Bytes {
Bytes {
inner: unsafe { self.inner.shallow_clone(false) },
}
}
}
impl AsRef<[u8]> for Bytes {
#[inline]
fn as_ref(&self) -> &[u8] {
self.inner.as_ref()
}
}
impl ops::Deref for Bytes {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.inner.as_ref()
}
}
impl From<BytesMut> for Bytes {
fn from(src: BytesMut) -> Bytes {
src.freeze()
}
}
impl From<Vec<u8>> for Bytes {
fn from(src: Vec<u8>) -> Bytes {
BytesMut::from(src).freeze()
}
}
impl From<String> for Bytes {
fn from(src: String) -> Bytes {
BytesMut::from(src).freeze()
}
}
impl<'a> From<&'a [u8]> for Bytes {
fn from(src: &'a [u8]) -> Bytes {
BytesMut::from(src).freeze()
}
}
impl<'a> From<&'a str> for Bytes {
fn from(src: &'a str) -> Bytes {
BytesMut::from(src).freeze()
}
}
impl FromIterator<u8> for BytesMut {
fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
let iter = into_iter.into_iter();
let (min, maybe_max) = iter.size_hint();
let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min));
for i in iter {
out.reserve(1);
out.put(i);
}
out
}
}
impl FromIterator<u8> for Bytes {
fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
BytesMut::from_iter(into_iter).freeze()
}
}
impl<'a> FromIterator<&'a u8> for BytesMut {
fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
}
}
impl<'a> FromIterator<&'a u8> for Bytes {
fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
BytesMut::from_iter(into_iter).freeze()
}
}
impl PartialEq for Bytes {
fn eq(&self, other: &Bytes) -> bool {
self.inner.as_ref() == other.inner.as_ref()
}
}
impl PartialOrd for Bytes {
fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
self.inner.as_ref().partial_cmp(other.inner.as_ref())
}
}
impl Ord for Bytes {
fn cmp(&self, other: &Bytes) -> cmp::Ordering {
self.inner.as_ref().cmp(other.inner.as_ref())
}
}
impl Eq for Bytes {
}
impl Default for Bytes {
#[inline]
fn default() -> Bytes {
Bytes::new()
}
}
impl fmt::Debug for Bytes {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
}
}
impl hash::Hash for Bytes {
fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
let s: &[u8] = self.as_ref();
s.hash(state);
}
}
impl Borrow<[u8]> for Bytes {
fn borrow(&self) -> &[u8] {
self.as_ref()
}
}
impl IntoIterator for Bytes {
type Item = u8;
type IntoIter = Iter<Cursor<Bytes>>;
fn into_iter(self) -> Self::IntoIter {
self.into_buf().iter()
}
}
impl<'a> IntoIterator for &'a Bytes {
type Item = u8;
type IntoIter = Iter<Cursor<&'a Bytes>>;
fn into_iter(self) -> Self::IntoIter {
self.into_buf().iter()
}
}
impl Extend<u8> for Bytes {
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
let iter = iter.into_iter();
let (lower, upper) = iter.size_hint();
if let Some(0) = upper {
return;
}
let mut bytes_mut = match mem::replace(self, Bytes::new()).try_mut() {
Ok(bytes_mut) => bytes_mut,
Err(bytes) => {
let mut bytes_mut = BytesMut::with_capacity(bytes.len() + lower);
bytes_mut.put_slice(&bytes);
bytes_mut
}
};
bytes_mut.extend(iter);
mem::replace(self, bytes_mut.freeze());
}
}
impl<'a> Extend<&'a u8> for Bytes {
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
self.extend(iter.into_iter().map(|b| *b))
}
}
impl BytesMut {
#[inline]
pub fn with_capacity(capacity: usize) -> BytesMut {
BytesMut {
inner: Inner::with_capacity(capacity),
}
}
#[inline]
pub fn new() -> BytesMut {
BytesMut::with_capacity(0)
}
#[inline]
pub fn len(&self) -> usize {
self.inner.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub fn capacity(&self) -> usize {
self.inner.capacity()
}
#[inline]
pub fn freeze(self) -> Bytes {
Bytes { inner: self.inner }
}
pub fn split_off(&mut self, at: usize) -> BytesMut {
BytesMut {
inner: self.inner.split_off(at),
}
}
pub fn take(&mut self) -> BytesMut {
let len = self.len();
self.split_to(len)
}
#[deprecated(since = "0.4.1", note = "use take instead")]
#[doc(hidden)]
pub fn drain(&mut self) -> BytesMut {
self.take()
}
pub fn split_to(&mut self, at: usize) -> BytesMut {
BytesMut {
inner: self.inner.split_to(at),
}
}
#[deprecated(since = "0.4.1", note = "use split_to instead")]
#[doc(hidden)]
pub fn drain_to(&mut self, at: usize) -> BytesMut {
self.split_to(at)
}
pub fn truncate(&mut self, len: usize) {
self.inner.truncate(len);
}
#[inline]
pub fn advance(&mut self, cnt: usize) {
assert!(cnt <= self.len(), "cannot advance past `remaining`");
unsafe { self.inner.set_start(cnt); }
}
pub fn clear(&mut self) {
self.truncate(0);
}
pub fn resize(&mut self, new_len: usize, value: u8) {
self.inner.resize(new_len, value);
}
pub unsafe fn set_len(&mut self, len: usize) {
self.inner.set_len(len)
}
pub fn reserve(&mut self, additional: usize) {
self.inner.reserve(additional)
}
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.put_slice(extend);
}
pub fn unsplit(&mut self, other: BytesMut) {
let ptr;
if other.is_empty() {
return;
}
if self.is_empty() {
*self = other;
return;
}
unsafe {
ptr = self.inner.ptr.offset(self.inner.len as isize);
}
if ptr == other.inner.ptr &&
self.inner.kind() == KIND_ARC &&
other.inner.kind() == KIND_ARC
{
debug_assert_eq!(self.inner.arc.load(Acquire),
other.inner.arc.load(Acquire));
self.inner.len += other.inner.len;
self.inner.cap += other.inner.cap;
}
else {
self.extend_from_slice(&other);
}
}
}
impl BufMut for BytesMut {
#[inline]
fn remaining_mut(&self) -> usize {
self.capacity() - self.len()
}
#[inline]
unsafe fn advance_mut(&mut self, cnt: usize) {
let new_len = self.len() + cnt;
self.inner.set_len(new_len);
}
#[inline]
unsafe fn bytes_mut(&mut self) -> &mut [u8] {
let len = self.len();
&mut self.inner.as_raw()[len..]
}
#[inline]
fn put_slice(&mut self, src: &[u8]) {
assert!(self.remaining_mut() >= src.len());
let len = src.len();
unsafe {
self.bytes_mut()[..len].copy_from_slice(src);
self.advance_mut(len);
}
}
#[inline]
fn put_u8(&mut self, n: u8) {
self.inner.put_u8(n);
}
#[inline]
fn put_i8(&mut self, n: i8) {
self.put_u8(n as u8);
}
}
impl IntoBuf for BytesMut {
type Buf = Cursor<Self>;
fn into_buf(self) -> Self::Buf {
Cursor::new(self)
}
}
impl<'a> IntoBuf for &'a BytesMut {
type Buf = Cursor<&'a BytesMut>;
fn into_buf(self) -> Self::Buf {
Cursor::new(self)
}
}
impl AsRef<[u8]> for BytesMut {
#[inline]
fn as_ref(&self) -> &[u8] {
self.inner.as_ref()
}
}
impl ops::Deref for BytesMut {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.as_ref()
}
}
impl AsMut<[u8]> for BytesMut {
fn as_mut(&mut self) -> &mut [u8] {
self.inner.as_mut()
}
}
impl ops::DerefMut for BytesMut {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
self.inner.as_mut()
}
}
impl From<Vec<u8>> for BytesMut {
fn from(src: Vec<u8>) -> BytesMut {
BytesMut {
inner: Inner::from_vec(src),
}
}
}
impl From<String> for BytesMut {
fn from(src: String) -> BytesMut {
BytesMut::from(src.into_bytes())
}
}
impl<'a> From<&'a [u8]> for BytesMut {
fn from(src: &'a [u8]) -> BytesMut {
let len = src.len();
if len == 0 {
BytesMut::new()
} else if len <= INLINE_CAP {
unsafe {
let mut inner: Inner = mem::uninitialized();
inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
inner.set_inline_len(len);
inner.as_raw()[0..len].copy_from_slice(src);
BytesMut {
inner: inner,
}
}
} else {
BytesMut::from(src.to_vec())
}
}
}
impl<'a> From<&'a str> for BytesMut {
fn from(src: &'a str) -> BytesMut {
BytesMut::from(src.as_bytes())
}
}
impl From<Bytes> for BytesMut {
fn from(src: Bytes) -> BytesMut {
src.try_mut()
.unwrap_or_else(|src| BytesMut::from(&src[..]))
}
}
impl PartialEq for BytesMut {
fn eq(&self, other: &BytesMut) -> bool {
self.inner.as_ref() == other.inner.as_ref()
}
}
impl PartialOrd for BytesMut {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
self.inner.as_ref().partial_cmp(other.inner.as_ref())
}
}
impl Ord for BytesMut {
fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
self.inner.as_ref().cmp(other.inner.as_ref())
}
}
impl Eq for BytesMut {
}
impl Default for BytesMut {
#[inline]
fn default() -> BytesMut {
BytesMut::new()
}
}
impl fmt::Debug for BytesMut {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt)
}
}
impl hash::Hash for BytesMut {
fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
let s: &[u8] = self.as_ref();
s.hash(state);
}
}
impl Borrow<[u8]> for BytesMut {
fn borrow(&self) -> &[u8] {
self.as_ref()
}
}
impl BorrowMut<[u8]> for BytesMut {
fn borrow_mut(&mut self) -> &mut [u8] {
self.as_mut()
}
}
impl fmt::Write for BytesMut {
#[inline]
fn write_str(&mut self, s: &str) -> fmt::Result {
if self.remaining_mut() >= s.len() {
self.put_slice(s.as_bytes());
Ok(())
} else {
Err(fmt::Error)
}
}
#[inline]
fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result {
fmt::write(self, args)
}
}
impl Clone for BytesMut {
fn clone(&self) -> BytesMut {
BytesMut::from(&self[..])
}
}
impl IntoIterator for BytesMut {
type Item = u8;
type IntoIter = Iter<Cursor<BytesMut>>;
fn into_iter(self) -> Self::IntoIter {
self.into_buf().iter()
}
}
impl<'a> IntoIterator for &'a BytesMut {
type Item = u8;
type IntoIter = Iter<Cursor<&'a BytesMut>>;
fn into_iter(self) -> Self::IntoIter {
self.into_buf().iter()
}
}
impl Extend<u8> for BytesMut {
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
let iter = iter.into_iter();
let (lower, _) = iter.size_hint();
self.reserve(lower);
for b in iter {
unsafe {
self.bytes_mut()[0] = b;
self.advance_mut(1);
}
}
}
}
impl<'a> Extend<&'a u8> for BytesMut {
fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
self.extend(iter.into_iter().map(|b| *b))
}
}
impl Inner {
#[inline]
fn from_static(bytes: &'static [u8]) -> Inner {
let ptr = bytes.as_ptr() as *mut u8;
Inner {
arc: AtomicPtr::new(KIND_STATIC as *mut Shared),
ptr: ptr,
len: bytes.len(),
cap: bytes.len(),
}
}
#[inline]
fn from_vec(mut src: Vec<u8>) -> Inner {
let len = src.len();
let cap = src.capacity();
let ptr = src.as_mut_ptr();
mem::forget(src);
let original_capacity_repr = original_capacity_to_repr(cap);
let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
Inner {
arc: AtomicPtr::new(arc as *mut Shared),
ptr: ptr,
len: len,
cap: cap,
}
}
#[inline]
fn with_capacity(capacity: usize) -> Inner {
if capacity <= INLINE_CAP {
unsafe {
let mut inner: Inner = mem::uninitialized();
inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared);
inner
}
} else {
Inner::from_vec(Vec::with_capacity(capacity))
}
}
#[inline]
fn as_ref(&self) -> &[u8] {
unsafe {
if self.is_inline() {
slice::from_raw_parts(self.inline_ptr(), self.inline_len())
} else {
slice::from_raw_parts(self.ptr, self.len)
}
}
}
#[inline]
fn as_mut(&mut self) -> &mut [u8] {
debug_assert!(!self.is_static());
unsafe {
if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
} else {
slice::from_raw_parts_mut(self.ptr, self.len)
}
}
}
#[inline]
unsafe fn as_raw(&mut self) -> &mut [u8] {
debug_assert!(!self.is_static());
if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
} else {
slice::from_raw_parts_mut(self.ptr, self.cap)
}
}
#[inline]
fn put_u8(&mut self, n: u8) {
if self.is_inline() {
let len = self.inline_len();
assert!(len < INLINE_CAP);
unsafe {
*self.inline_ptr().offset(len as isize) = n;
}
self.set_inline_len(len + 1);
} else {
assert!(self.len < self.cap);
unsafe {
*self.ptr.offset(self.len as isize) = n;
}
self.len += 1;
}
}
#[inline]
fn len(&self) -> usize {
if self.is_inline() {
self.inline_len()
} else {
self.len
}
}
#[inline]
unsafe fn inline_ptr(&self) -> *mut u8 {
(self as *const Inner as *mut Inner as *mut u8)
.offset(INLINE_DATA_OFFSET)
}
#[inline]
fn inline_len(&self) -> usize {
let p: &usize = unsafe { mem::transmute(&self.arc) };
(p & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
}
#[inline]
fn set_inline_len(&mut self, len: usize) {
debug_assert!(len <= INLINE_CAP);
let p = self.arc.get_mut();
*p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _;
}
#[inline]
unsafe fn set_len(&mut self, len: usize) {
if self.is_inline() {
assert!(len <= INLINE_CAP);
self.set_inline_len(len);
} else {
assert!(len <= self.cap);
self.len = len;
}
}
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
fn capacity(&self) -> usize {
if self.is_inline() {
INLINE_CAP
} else {
self.cap
}
}
fn split_off(&mut self, at: usize) -> Inner {
let mut other = unsafe { self.shallow_clone(true) };
unsafe {
other.set_start(at);
self.set_end(at);
}
return other
}
fn split_to(&mut self, at: usize) -> Inner {
let mut other = unsafe { self.shallow_clone(true) };
unsafe {
other.set_end(at);
self.set_start(at);
}
return other
}
fn truncate(&mut self, len: usize) {
if len <= self.len() {
unsafe { self.set_len(len); }
}
}
fn resize(&mut self, new_len: usize, value: u8) {
let len = self.len();
if new_len > len {
let additional = new_len - len;
self.reserve(additional);
unsafe {
let dst = self.as_raw()[len..].as_mut_ptr();
ptr::write_bytes(dst, value, additional);
self.set_len(new_len);
}
} else {
self.truncate(new_len);
}
}
unsafe fn set_start(&mut self, start: usize) {
if start == 0 {
return;
}
let kind = self.kind();
if kind == KIND_INLINE {
assert!(start <= INLINE_CAP);
let len = self.inline_len();
if len <= start {
self.set_inline_len(0);
} else {
let new_len = len - start;
let dst = self.inline_ptr();
let src = (dst as *const u8).offset(start as isize);
ptr::copy(src, dst, new_len);
self.set_inline_len(new_len);
}
} else {
assert!(start <= self.cap);
if kind == KIND_VEC {
let (mut pos, prev) = self.uncoordinated_get_vec_pos();
pos += start;
if pos <= MAX_VEC_POS {
self.uncoordinated_set_vec_pos(pos, prev);
} else {
let _ = self.shallow_clone(true);
}
}
self.ptr = self.ptr.offset(start as isize);
if self.len >= start {
self.len -= start;
} else {
self.len = 0;
}
self.cap -= start;
}
}
unsafe fn set_end(&mut self, end: usize) {
debug_assert!(self.is_shared());
if self.is_inline() {
assert!(end <= INLINE_CAP);
let new_len = cmp::min(self.inline_len(), end);
self.set_inline_len(new_len);
} else {
assert!(end <= self.cap);
self.cap = end;
self.len = cmp::min(self.len, end);
}
}
fn is_mut_safe(&mut self) -> bool {
let kind = self.kind();
if kind == KIND_INLINE {
true
} else if kind == KIND_VEC {
true
} else if kind == KIND_STATIC {
false
} else {
unsafe { (**self.arc.get_mut()).is_unique() }
}
}
unsafe fn shallow_clone(&self, mut_self: bool) -> Inner {
if self.is_inline_or_static() {
let mut inner: Inner = mem::uninitialized();
ptr::copy_nonoverlapping(
self,
&mut inner,
1,
);
inner
} else {
self.shallow_clone_sync(mut_self)
}
}
#[cold]
unsafe fn shallow_clone_sync(&self, mut_self: bool) -> Inner {
let arc = self.arc.load(Acquire);
let kind = arc as usize & KIND_MASK;
if kind == KIND_ARC {
self.shallow_clone_arc(arc)
} else {
assert!(kind == KIND_VEC);
self.shallow_clone_vec(arc as usize, mut_self)
}
}
unsafe fn shallow_clone_arc(&self, arc: *mut Shared) -> Inner {
debug_assert!(arc as usize & KIND_MASK == KIND_ARC);
let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
if old_size == usize::MAX {
abort();
}
Inner {
arc: AtomicPtr::new(arc),
.. *self
}
}
#[cold]
unsafe fn shallow_clone_vec(&self, arc: usize, mut_self: bool) -> Inner {
debug_assert!(arc & KIND_MASK == KIND_VEC);
let original_capacity_repr =
(arc as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
let off = (arc as usize) >> VEC_POS_OFFSET;
let shared = Box::new(Shared {
vec: rebuild_vec(self.ptr, self.len, self.cap, off),
original_capacity_repr: original_capacity_repr,
ref_count: AtomicUsize::new(2),
});
let shared = Box::into_raw(shared);
debug_assert!(0 == (shared as usize & 0b11));
if mut_self {
self.arc.store(shared, Relaxed);
return Inner {
arc: AtomicPtr::new(shared),
.. *self
};
}
let actual = self.arc.compare_and_swap(arc as *mut Shared, shared, AcqRel);
if actual as usize == arc {
return Inner {
arc: AtomicPtr::new(shared),
.. *self
};
}
let shared = Box::from_raw(shared);
mem::forget(*shared);
self.shallow_clone_arc(actual)
}
#[inline]
fn reserve(&mut self, additional: usize) {
let len = self.len();
let rem = self.capacity() - len;
if additional <= rem {
return;
}
let kind = self.kind();
if kind == KIND_INLINE {
let new_cap = len + additional;
let mut v = Vec::with_capacity(new_cap);
v.extend_from_slice(self.as_ref());
self.ptr = v.as_mut_ptr();
self.len = v.len();
self.cap = v.capacity();
self.arc = AtomicPtr::new(KIND_VEC as *mut Shared);
mem::forget(v);
return;
}
if kind == KIND_VEC {
unsafe {
let (off, prev) = self.uncoordinated_get_vec_pos();
if off >= additional && off >= (self.cap / 2) {
let base_ptr = self.ptr.offset(-(off as isize));
ptr::copy(self.ptr, base_ptr, self.len);
self.ptr = base_ptr;
self.uncoordinated_set_vec_pos(0, prev);
self.cap += off;
} else {
let mut v = rebuild_vec(self.ptr, self.len, self.cap, off);
v.reserve(additional);
self.ptr = v.as_mut_ptr().offset(off as isize);
self.len = v.len() - off;
self.cap = v.capacity() - off;
mem::forget(v);
}
return;
}
}
let arc = *self.arc.get_mut();
debug_assert!(kind == KIND_ARC);
let mut new_cap = len + additional;
let original_capacity;
let original_capacity_repr;
unsafe {
original_capacity_repr = (*arc).original_capacity_repr;
original_capacity = original_capacity_from_repr(original_capacity_repr);
if (*arc).is_unique() {
let v = &mut (*arc).vec;
if v.capacity() >= new_cap {
let ptr = v.as_mut_ptr();
ptr::copy(self.ptr, ptr, len);
self.ptr = ptr;
self.cap = v.capacity();
return;
}
new_cap = cmp::max(
cmp::max(v.capacity() << 1, new_cap),
original_capacity);
} else {
new_cap = cmp::max(new_cap, original_capacity);
}
}
let mut v = Vec::with_capacity(new_cap);
v.extend_from_slice(self.as_ref());
release_shared(arc);
self.ptr = v.as_mut_ptr();
self.len = v.len();
self.cap = v.capacity();
let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
self.arc = AtomicPtr::new(arc as *mut Shared);
mem::forget(v);
}
#[inline]
fn is_inline(&self) -> bool {
self.kind() == KIND_INLINE
}
#[inline]
fn is_inline_or_static(&self) -> bool {
let kind = self.kind();
kind == KIND_INLINE || kind == KIND_STATIC
}
#[inline]
fn is_shared(&mut self) -> bool {
match self.kind() {
KIND_VEC => false,
_ => true,
}
}
#[inline]
fn is_static(&mut self) -> bool {
match self.kind() {
KIND_STATIC => true,
_ => false,
}
}
#[inline]
fn kind(&self) -> usize {
#[cfg(target_endian = "little")]
#[inline]
fn imp(arc: &AtomicPtr<Shared>) -> usize {
unsafe {
let p: *const u8 = mem::transmute(arc);
(*p as usize) & KIND_MASK
}
}
#[cfg(target_endian = "big")]
#[inline]
fn imp(arc: &AtomicPtr<Shared>) -> usize {
unsafe {
let p: *const usize = mem::transmute(arc);
*p & KIND_MASK
}
}
imp(&self.arc)
}
#[inline]
fn uncoordinated_get_vec_pos(&mut self) -> (usize, usize) {
let prev = unsafe {
let p: &AtomicPtr<Shared> = &self.arc;
let p: *const usize = mem::transmute(p);
*p
};
(prev >> VEC_POS_OFFSET, prev)
}
#[inline]
fn uncoordinated_set_vec_pos(&mut self, pos: usize, prev: usize) {
debug_assert!(pos <= MAX_VEC_POS);
unsafe {
let p: &mut AtomicPtr<Shared> = &mut self.arc;
let p: &mut usize = mem::transmute(p);
*p = (pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK);
}
}
}
fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
unsafe {
let ptr = ptr.offset(-(off as isize));
len += off;
cap += off;
Vec::from_raw_parts(ptr, len, cap)
}
}
impl Drop for Inner {
fn drop(&mut self) {
let kind = self.kind();
if kind == KIND_VEC {
let (off, _) = self.uncoordinated_get_vec_pos();
let _ = rebuild_vec(self.ptr, self.len, self.cap, off);
} else if kind == KIND_ARC {
release_shared(*self.arc.get_mut());
}
}
}
fn release_shared(ptr: *mut Shared) {
unsafe {
if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
return;
}
atomic::fence(Acquire);
Box::from_raw(ptr);
}
}
impl Shared {
fn is_unique(&self) -> bool {
self.ref_count.load(Acquire) == 1
}
}
fn original_capacity_to_repr(cap: usize) -> usize {
let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH)
}
fn original_capacity_from_repr(repr: usize) -> usize {
if repr == 0 {
return 0;
}
1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
}
#[test]
fn test_original_capacity_to_repr() {
assert_eq!(original_capacity_to_repr(0), 0);
let max_width = 32;
for width in 1..(max_width + 1) {
let cap = 1 << width - 1;
let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
0
} else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
width - MIN_ORIGINAL_CAPACITY_WIDTH
} else {
MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
};
assert_eq!(original_capacity_to_repr(cap), expected);
if width > 1 {
assert_eq!(original_capacity_to_repr(cap + 1), expected);
}
if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
assert_eq!(original_capacity_to_repr(cap + 76), expected);
} else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
}
}
}
#[test]
fn test_original_capacity_from_repr() {
assert_eq!(0, original_capacity_from_repr(0));
let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
assert_eq!(min_cap, original_capacity_from_repr(1));
assert_eq!(min_cap * 2, original_capacity_from_repr(2));
assert_eq!(min_cap * 4, original_capacity_from_repr(3));
assert_eq!(min_cap * 8, original_capacity_from_repr(4));
assert_eq!(min_cap * 16, original_capacity_from_repr(5));
assert_eq!(min_cap * 32, original_capacity_from_repr(6));
assert_eq!(min_cap * 64, original_capacity_from_repr(7));
}
unsafe impl Send for Inner {}
unsafe impl Sync for Inner {}
impl PartialEq<[u8]> for BytesMut {
fn eq(&self, other: &[u8]) -> bool {
&**self == other
}
}
impl PartialOrd<[u8]> for BytesMut {
fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
(**self).partial_cmp(other)
}
}
impl PartialEq<BytesMut> for [u8] {
fn eq(&self, other: &BytesMut) -> bool {
*other == *self
}
}
impl PartialOrd<BytesMut> for [u8] {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl PartialEq<str> for BytesMut {
fn eq(&self, other: &str) -> bool {
&**self == other.as_bytes()
}
}
impl PartialOrd<str> for BytesMut {
fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
(**self).partial_cmp(other.as_bytes())
}
}
impl PartialEq<BytesMut> for str {
fn eq(&self, other: &BytesMut) -> bool {
*other == *self
}
}
impl PartialOrd<BytesMut> for str {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl PartialEq<Vec<u8>> for BytesMut {
fn eq(&self, other: &Vec<u8>) -> bool {
*self == &other[..]
}
}
impl PartialOrd<Vec<u8>> for BytesMut {
fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
(**self).partial_cmp(&other[..])
}
}
impl PartialEq<BytesMut> for Vec<u8> {
fn eq(&self, other: &BytesMut) -> bool {
*other == *self
}
}
impl PartialOrd<BytesMut> for Vec<u8> {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl PartialEq<String> for BytesMut {
fn eq(&self, other: &String) -> bool {
*self == &other[..]
}
}
impl PartialOrd<String> for BytesMut {
fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
(**self).partial_cmp(other.as_bytes())
}
}
impl PartialEq<BytesMut> for String {
fn eq(&self, other: &BytesMut) -> bool {
*other == *self
}
}
impl PartialOrd<BytesMut> for String {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
where BytesMut: PartialEq<T>
{
fn eq(&self, other: &&'a T) -> bool {
*self == **other
}
}
impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
where BytesMut: PartialOrd<T>
{
fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
self.partial_cmp(*other)
}
}
impl<'a> PartialEq<BytesMut> for &'a [u8] {
fn eq(&self, other: &BytesMut) -> bool {
*other == *self
}
}
impl<'a> PartialOrd<BytesMut> for &'a [u8] {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl<'a> PartialEq<BytesMut> for &'a str {
fn eq(&self, other: &BytesMut) -> bool {
*other == *self
}
}
impl<'a> PartialOrd<BytesMut> for &'a str {
fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl PartialEq<[u8]> for Bytes {
fn eq(&self, other: &[u8]) -> bool {
self.inner.as_ref() == other
}
}
impl PartialOrd<[u8]> for Bytes {
fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
self.inner.as_ref().partial_cmp(other)
}
}
impl PartialEq<Bytes> for [u8] {
fn eq(&self, other: &Bytes) -> bool {
*other == *self
}
}
impl PartialOrd<Bytes> for [u8] {
fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl PartialEq<str> for Bytes {
fn eq(&self, other: &str) -> bool {
self.inner.as_ref() == other.as_bytes()
}
}
impl PartialOrd<str> for Bytes {
fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
self.inner.as_ref().partial_cmp(other.as_bytes())
}
}
impl PartialEq<Bytes> for str {
fn eq(&self, other: &Bytes) -> bool {
*other == *self
}
}
impl PartialOrd<Bytes> for str {
fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl PartialEq<Vec<u8>> for Bytes {
fn eq(&self, other: &Vec<u8>) -> bool {
*self == &other[..]
}
}
impl PartialOrd<Vec<u8>> for Bytes {
fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
self.inner.as_ref().partial_cmp(&other[..])
}
}
impl PartialEq<Bytes> for Vec<u8> {
fn eq(&self, other: &Bytes) -> bool {
*other == *self
}
}
impl PartialOrd<Bytes> for Vec<u8> {
fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl PartialEq<String> for Bytes {
fn eq(&self, other: &String) -> bool {
*self == &other[..]
}
}
impl PartialOrd<String> for Bytes {
fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
self.inner.as_ref().partial_cmp(other.as_bytes())
}
}
impl PartialEq<Bytes> for String {
fn eq(&self, other: &Bytes) -> bool {
*other == *self
}
}
impl PartialOrd<Bytes> for String {
fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl<'a> PartialEq<Bytes> for &'a [u8] {
fn eq(&self, other: &Bytes) -> bool {
*other == *self
}
}
impl<'a> PartialOrd<Bytes> for &'a [u8] {
fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl<'a> PartialEq<Bytes> for &'a str {
fn eq(&self, other: &Bytes) -> bool {
*other == *self
}
}
impl<'a> PartialOrd<Bytes> for &'a str {
fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> {
other.partial_cmp(self)
}
}
impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
where Bytes: PartialEq<T>
{
fn eq(&self, other: &&'a T) -> bool {
*self == **other
}
}
impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
where Bytes: PartialOrd<T>
{
fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
self.partial_cmp(&**other)
}
}
impl PartialEq<BytesMut> for Bytes
{
fn eq(&self, other: &BytesMut) -> bool {
&other[..] == &self[..]
}
}
impl PartialEq<Bytes> for BytesMut
{
fn eq(&self, other: &Bytes) -> bool {
&other[..] == &self[..]
}
}
struct Abort;
impl Drop for Abort {
fn drop(&mut self) {
panic!();
}
}
#[inline(never)]
#[cold]
fn abort() {
let _a = Abort;
panic!();
}