use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use std::sync::atomic::{self, AtomicUsize};
use std::{cmp, mem, ptr, ptr::NonNull, slice};
use crate::BytesMut;
#[cfg(target_endian = "little")]
#[repr(C)]
pub(crate) struct Storage {
arc: NonNull<Shared>,
ptr: *mut u8,
len: usize,
cap: usize,
}
#[cfg(target_endian = "big")]
#[repr(C)]
pub(crate) struct Storage {
ptr: *mut u8,
len: usize,
cap: usize,
arc: NonNull<Shared>,
}
struct Shared {
vec: Vec<u8>,
ref_count: AtomicUsize,
}
#[repr(C)]
struct SharedVec {
cap: usize,
len: u32,
offset: u32,
ref_count: AtomicUsize,
}
pub(crate) struct StorageVec(NonNull<SharedVec>);
const KIND_ARC: usize = 0b00;
const KIND_INLINE: usize = 0b01;
const KIND_STATIC: usize = 0b10;
const KIND_VEC: usize = 0b11;
const KIND_MASK: usize = 0b11;
const KIND_UNMASK: usize = !KIND_MASK;
const SHARED_VEC_SIZE: usize = mem::size_of::<SharedVec>();
const INLINE_LEN_MASK: usize = 0b1111_1100;
const INLINE_LEN_OFFSET: usize = 2;
#[cfg(target_endian = "little")]
const INLINE_DATA_OFFSET: isize = 2;
#[cfg(target_endian = "big")]
const INLINE_DATA_OFFSET: isize = 0;
#[cfg(target_pointer_width = "64")]
pub(crate) const INLINE_CAP: usize = 4 * 8 - 2;
#[cfg(target_pointer_width = "32")]
pub(crate) const INLINE_CAP: usize = 4 * 4 - 2;
impl Storage {
#[inline]
pub(crate) const fn empty() -> Storage {
Storage {
arc: unsafe { NonNull::new_unchecked(KIND_INLINE as *mut Shared) },
ptr: ptr::null_mut::<u8>(),
len: 0,
cap: 0,
}
}
#[inline]
pub(crate) fn empty_inline() -> Storage {
Storage::from_slice_with_capacity(INLINE_CAP, &[])
}
#[inline]
pub(crate) fn with_capacity(capacity: usize) -> Storage {
Storage::from_slice_with_capacity(capacity, &[])
}
#[inline]
pub(crate) const fn from_static(bytes: &'static [u8]) -> Storage {
let ptr = bytes.as_ptr() as *mut u8;
Storage {
arc: unsafe { NonNull::new_unchecked(KIND_STATIC as *mut Shared) },
ptr,
len: bytes.len(),
cap: bytes.len(),
}
}
#[inline]
pub(crate) fn from_vec(mut vec: Vec<u8>) -> Storage {
let len = vec.len();
let cap = vec.capacity();
let ptr = vec.as_mut_ptr();
let shared = Box::into_raw(Box::new(Shared {
vec,
ref_count: AtomicUsize::new(1),
}));
debug_assert!(0 == (shared as usize & KIND_MASK));
Storage {
ptr,
len,
cap,
arc: unsafe { NonNull::new_unchecked(shared) },
}
}
#[inline]
pub(crate) fn from_slice(src: &[u8]) -> Storage {
if src.len() <= INLINE_CAP {
unsafe { Storage::from_ptr_inline(src.as_ptr(), src.len()) }
} else {
Storage::from_slice_with_capacity(src.len(), src)
}
}
#[inline]
fn from_slice_inline(src: &[u8]) -> Storage {
unsafe { Storage::from_ptr_inline(src.as_ptr(), src.len()) }
}
#[allow(warnings)]
#[inline]
fn from_slice_with_capacity(cap: usize, src: &[u8]) -> Storage {
let ptr = SharedVec::create(cap, src);
unsafe {
let cap = (*ptr).cap - SHARED_VEC_SIZE;
let arc = NonNull::new_unchecked((ptr as usize ^ KIND_VEC) as *mut Shared);
Storage {
cap,
arc,
ptr: ptr.add(1) as *mut u8,
len: src.len(),
}
}
}
#[inline]
unsafe fn from_ptr_inline(src: *const u8, len: usize) -> Storage {
let mut inner = Storage {
arc: NonNull::new_unchecked(KIND_INLINE as *mut Shared),
ptr: ptr::null_mut(),
len: 0,
cap: 0,
};
let dst = inner.inline_ptr();
ptr::copy(src, dst, len);
inner.set_inline_len(len);
inner
}
#[inline]
pub(crate) fn as_ref(&self) -> &[u8] {
unsafe {
if self.is_inline() {
slice::from_raw_parts(self.inline_ptr_ro(), self.inline_len())
} else {
slice::from_raw_parts(self.ptr, self.len)
}
}
}
#[inline]
pub(crate) fn as_mut(&mut self) -> &mut [u8] {
debug_assert!(self.kind() != KIND_STATIC);
unsafe {
if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len())
} else {
slice::from_raw_parts_mut(self.ptr, self.len)
}
}
}
#[inline]
pub(crate) unsafe fn as_raw(&mut self) -> &mut [u8] {
debug_assert!(self.kind() != KIND_STATIC);
if self.is_inline() {
slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP)
} else {
slice::from_raw_parts_mut(self.ptr, self.cap)
}
}
#[inline]
unsafe fn as_ptr(&mut self) -> *mut u8 {
if self.is_inline() {
self.inline_ptr()
} else {
self.ptr
}
}
#[inline]
pub(crate) fn put_u8(&mut self, n: u8) {
if self.is_inline() {
let len = self.inline_len();
assert!(len < INLINE_CAP);
unsafe {
*self.inline_ptr().add(len) = n;
}
self.set_inline_len(len + 1);
} else {
assert!(self.len < self.cap);
unsafe {
*self.ptr.add(self.len) = n;
}
self.len += 1;
}
}
#[inline]
pub(crate) fn len(&self) -> usize {
if self.is_inline() {
self.inline_len()
} else {
self.len
}
}
#[inline]
unsafe fn inline_ptr(&mut self) -> *mut u8 {
(self as *mut Storage as *mut u8).offset(INLINE_DATA_OFFSET)
}
#[inline]
unsafe fn inline_ptr_ro(&self) -> *const u8 {
(self as *const Storage as *const u8).offset(INLINE_DATA_OFFSET)
}
#[inline]
fn inline_len(&self) -> usize {
(self.arc.as_ptr() as usize & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET
}
#[inline]
fn set_inline_len(&mut self, len: usize) {
debug_assert!(len <= INLINE_CAP);
self.arc = unsafe {
NonNull::new_unchecked(
((self.arc.as_ptr() as usize & !INLINE_LEN_MASK)
| (len << INLINE_LEN_OFFSET)) as _,
)
};
}
#[inline]
pub(crate) fn is_empty(&self) -> bool {
self.len() == 0
}
#[inline]
pub(crate) fn capacity(&self) -> usize {
if self.is_inline() {
INLINE_CAP
} else {
self.cap
}
}
pub(crate) fn split_off(&mut self, at: usize, create_inline: bool) -> Storage {
let other = unsafe {
if create_inline && self.len() - at <= INLINE_CAP {
Storage::from_ptr_inline(self.as_ptr().add(at), self.len() - at)
} else {
let mut other = self.shallow_clone();
other.set_start(at);
other
}
};
unsafe {
if create_inline && at <= INLINE_CAP {
*self = Storage::from_ptr_inline(self.as_ptr(), at);
} else {
self.set_end(at);
}
}
other
}
pub(crate) fn split_to(&mut self, at: usize, create_inline: bool) -> Storage {
let other = unsafe {
if create_inline && at <= INLINE_CAP {
Storage::from_ptr_inline(self.as_ptr(), at)
} else {
let mut other = self.shallow_clone();
other.set_end(at);
other
}
};
unsafe {
if create_inline && self.len() - at <= INLINE_CAP {
*self = Storage::from_ptr_inline(self.as_ptr().add(at), self.len() - at);
} else {
self.set_start(at);
}
}
other
}
pub(crate) fn truncate(&mut self, len: usize, create_inline: bool) {
unsafe {
if len <= self.len() {
if create_inline && len < INLINE_CAP {
*self = Storage::from_ptr_inline(self.as_ptr(), len);
} else {
self.set_len(len);
}
}
}
}
pub(crate) fn trimdown(&mut self) {
let kind = self.kind();
if !(kind == KIND_INLINE || kind == KIND_STATIC) {
if self.len() <= INLINE_CAP {
*self = unsafe { Storage::from_ptr_inline(self.as_ptr(), self.len()) };
} else if self.capacity() - self.len() >= 64 {
*self = Storage::from_slice_with_capacity(self.len(), self.as_ref());
}
}
}
pub(crate) fn resize(&mut self, new_len: usize, value: u8) {
let len = self.len();
if new_len > len {
let additional = new_len - len;
self.reserve(additional);
unsafe {
let dst = self.as_raw()[len..].as_mut_ptr();
ptr::write_bytes(dst, value, additional);
self.set_len(new_len);
}
} else {
self.truncate(new_len, false);
}
}
#[inline]
pub(crate) fn freeze(self) -> Storage {
if self.len() <= INLINE_CAP {
Storage::from_slice_inline(self.as_ref())
} else {
self
}
}
#[inline]
pub(crate) unsafe fn set_len(&mut self, len: usize) {
if self.is_inline() {
assert!(len <= INLINE_CAP);
self.set_inline_len(len);
} else {
assert!(len <= self.cap);
self.len = len;
}
}
pub(crate) unsafe fn set_start(&mut self, start: usize) {
if start == 0 {
return;
}
let kind = self.kind();
if kind == KIND_INLINE {
assert!(start <= INLINE_CAP);
let len = self.inline_len();
if len <= start {
self.set_inline_len(0);
} else {
let new_len = len - start;
let dst = self.inline_ptr();
let src = (dst as *const u8).add(start);
ptr::copy(src, dst, new_len);
self.set_inline_len(new_len);
}
} else {
assert!(start <= self.cap);
self.ptr = self.ptr.add(start);
if self.len >= start {
self.len -= start;
} else {
self.len = 0;
}
self.cap -= start;
}
}
pub(crate) unsafe fn set_end(&mut self, end: usize) {
if self.is_inline() {
assert!(end <= INLINE_CAP);
let new_len = cmp::min(self.inline_len(), end);
self.set_inline_len(new_len);
} else {
assert!(end <= self.cap);
self.cap = end;
self.len = cmp::min(self.len, end);
}
}
pub(crate) fn is_mut_safe(&self) -> bool {
let kind = self.kind();
if kind == KIND_INLINE {
true
} else if kind == KIND_STATIC {
false
} else if kind == KIND_VEC {
unsafe { (*self.shared_vec()).is_unique() }
} else {
unsafe { (*self.arc.as_ptr()).is_unique() }
}
}
unsafe fn shallow_clone(&self) -> Storage {
let kind = self.kind();
if kind == KIND_INLINE || kind == KIND_STATIC {
let mut inner: mem::MaybeUninit<Storage> = mem::MaybeUninit::uninit();
ptr::copy_nonoverlapping(self, inner.as_mut_ptr(), 1);
inner.assume_init()
} else {
self.shallow_clone_sync()
}
}
#[cold]
unsafe fn shallow_clone_sync(&self) -> Storage {
let arc: *mut Shared = self.arc.as_ptr();
let kind = arc as usize & KIND_MASK;
if kind == KIND_ARC {
let old_size = (*arc).ref_count.fetch_add(1, Relaxed);
if old_size == usize::MAX {
abort();
}
Storage {
arc: NonNull::new_unchecked(arc),
..*self
}
} else {
assert!(kind == KIND_VEC);
let vec_arc = (arc as usize & KIND_UNMASK) as *mut SharedVec;
let old_size = (*vec_arc).ref_count.fetch_add(1, Relaxed);
if old_size == usize::MAX {
abort();
}
Storage {
arc: NonNull::new_unchecked(arc),
..*self
}
}
}
#[inline]
pub(crate) fn reserve(&mut self, additional: usize) {
let len = self.len();
let rem = self.capacity() - len;
if additional <= rem {
return;
}
self.reserve_inner(additional)
}
fn reserve_inner(&mut self, additional: usize) {
let len = self.len();
let kind = self.kind();
if kind == KIND_INLINE {
let new_cap = len + additional;
*self = Storage::from_slice_with_capacity(new_cap, self.as_ref());
return;
}
let new_cap = len + additional;
if kind == KIND_VEC {
let vec = self.shared_vec();
unsafe {
let vec_cap = (*vec).cap - SHARED_VEC_SIZE;
if vec_cap >= new_cap && (*vec).is_unique() {
let ptr = (vec as *mut u8).add(SHARED_VEC_SIZE);
ptr::copy(self.ptr, ptr, len);
self.ptr = ptr;
self.cap = vec_cap;
} else {
*self = Storage::from_slice_with_capacity(new_cap, self.as_ref());
}
}
} else {
debug_assert!(kind == KIND_ARC);
let arc = self.arc.as_ptr();
unsafe {
if (*arc).is_unique() {
let v = &mut (*arc).vec;
if v.capacity() >= new_cap {
let ptr = v.as_mut_ptr();
ptr::copy(self.ptr, ptr, len);
self.ptr = ptr;
self.cap = v.capacity();
return;
}
}
*self = Storage::from_slice_with_capacity(new_cap, self.as_ref());
}
}
}
#[inline]
pub(crate) fn is_inline(&self) -> bool {
self.kind() == KIND_INLINE
}
#[inline]
fn shared_vec(&self) -> *mut SharedVec {
((self.arc.as_ptr() as usize) & KIND_UNMASK) as *mut SharedVec
}
#[inline]
fn kind(&self) -> usize {
#[cfg(target_endian = "little")]
#[inline]
fn imp(arc: *mut Shared) -> usize {
(arc as usize) & KIND_MASK
}
#[cfg(target_endian = "big")]
#[inline]
fn imp(arc: *mut Shared) -> usize {
unsafe {
let p: *const usize = arc as *const usize;
*p & KIND_MASK
}
}
imp(self.arc.as_ptr())
}
}
unsafe impl Send for Storage {}
unsafe impl Sync for Storage {}
impl Clone for Storage {
fn clone(&self) -> Storage {
unsafe { self.shallow_clone() }
}
}
impl Drop for Storage {
fn drop(&mut self) {
let kind = self.kind();
if kind == KIND_VEC {
release_shared_vec(self.shared_vec());
} else if kind == KIND_ARC {
release_shared(self.arc.as_ptr());
}
}
}
fn release_shared(ptr: *mut Shared) {
unsafe {
if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
return;
}
atomic::fence(Acquire);
let _ = Box::from_raw(ptr);
}
}
impl StorageVec {
pub(crate) fn with_capacity(capacity: usize) -> StorageVec {
let shared_ptr = SharedVec::create(capacity, &[]);
StorageVec(unsafe { NonNull::new_unchecked(shared_ptr) })
}
pub(crate) fn from_slice(src: &[u8]) -> StorageVec {
let shared_ptr = SharedVec::create(src.len(), src);
StorageVec(unsafe { NonNull::new_unchecked(shared_ptr) })
}
pub(crate) fn as_ref(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) }
}
pub(crate) fn as_mut(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.as_ptr(), self.len()) }
}
pub(crate) unsafe fn as_raw(&mut self) -> &mut [u8] {
slice::from_raw_parts_mut(self.as_ptr(), self.capacity())
}
unsafe fn as_ptr(&self) -> *mut u8 {
(self.0.as_ptr() as *mut u8).add((*self.0.as_ptr()).offset as usize)
}
unsafe fn as_inner(&mut self) -> &mut SharedVec {
self.0.as_mut()
}
pub(crate) fn put_u8(&mut self, n: u8) {
let len = self.len();
assert!(len < self.capacity());
unsafe {
let inner = self.as_inner();
inner.len += 1;
*self.as_ptr().add(len) = n;
}
}
pub(crate) fn len(&self) -> usize {
unsafe { (*self.0.as_ptr()).len as usize }
}
pub(crate) fn capacity(&self) -> usize {
unsafe { (*self.0.as_ptr()).cap - (*self.0.as_ptr()).offset as usize }
}
pub(crate) fn freeze(self) -> Storage {
unsafe {
if self.len() <= INLINE_CAP {
Storage::from_ptr_inline(self.as_ptr(), self.len())
} else {
let inner = Storage {
ptr: self.as_ptr(),
len: self.len(),
cap: self.capacity(),
arc: NonNull::new_unchecked(
(self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
),
};
mem::forget(self);
inner
}
}
}
pub(crate) fn with_bytes_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut BytesMut) -> R,
{
unsafe {
let mut buf = BytesMut {
inner: Storage {
ptr: self.as_ptr(),
len: self.len(),
cap: self.capacity(),
arc: NonNull::new_unchecked(
(self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
),
},
};
let result = f(&mut buf);
let storage = match buf.inner.kind() {
KIND_VEC => {
let ptr = buf.inner.shared_vec();
let offset = buf.inner.ptr as usize - ptr as usize;
if buf.inner.cap < (*ptr).cap - offset {
StorageVec::from_slice(buf.inner.as_ref())
} else {
(*ptr).len = buf.len() as u32;
(*ptr).offset = offset as u32;
mem::forget(buf); StorageVec(NonNull::new_unchecked(ptr))
}
}
KIND_INLINE | KIND_STATIC | KIND_ARC => {
StorageVec::from_slice(buf.inner.as_ref())
}
_ => panic!(),
};
let old = mem::replace(self, storage);
mem::forget(old);
result
}
}
pub(crate) fn split_to(&mut self, at: usize, create_inline: bool) -> Storage {
unsafe {
let ptr = self.as_ptr();
let other = if create_inline && at <= INLINE_CAP {
Storage::from_ptr_inline(ptr, at)
} else {
let inner = self.as_inner();
let old_size = inner.ref_count.fetch_add(1, Relaxed);
if old_size == usize::MAX {
abort();
}
Storage {
ptr,
len: at,
cap: at,
arc: NonNull::new_unchecked(
(self.0.as_ptr() as usize ^ KIND_VEC) as *mut Shared,
),
}
};
self.set_start(at as u32);
other
}
}
pub(crate) fn truncate(&mut self, len: usize) {
unsafe {
if len == 0 {
let inner = self.as_inner();
if inner.is_unique() && inner.offset != SHARED_VEC_SIZE as u32 {
inner.offset = SHARED_VEC_SIZE as u32;
}
}
if len < self.len() {
self.set_len(len);
}
}
}
pub(crate) fn resize(&mut self, new_len: usize, value: u8) {
let len = self.len();
if new_len > len {
let additional = new_len - len;
self.reserve(additional);
unsafe {
let dst = self.as_raw()[len..].as_mut_ptr();
ptr::write_bytes(dst, value, additional);
self.set_len(new_len);
}
} else {
self.truncate(new_len);
}
}
#[inline]
pub(crate) fn reserve(&mut self, additional: usize) {
if additional <= self.capacity() - self.len() {
return;
}
self.reserve_inner(additional)
}
#[inline]
fn reserve_inner(&mut self, additional: usize) {
unsafe {
let inner = self.as_inner();
let len = inner.len as usize;
let cap = inner.cap - SHARED_VEC_SIZE;
let new_cap = len + additional;
if cap >= new_cap && inner.is_unique() {
let offset = inner.offset;
inner.offset = SHARED_VEC_SIZE as u32;
if len != 0 {
let ptr = self.0.as_ptr() as *mut u8;
ptr::copy(ptr.add(offset as usize), ptr.add(SHARED_VEC_SIZE), len);
}
} else {
*self = StorageVec(NonNull::new_unchecked(SharedVec::create(
new_cap,
self.as_ref(),
)));
}
}
}
#[inline]
pub(crate) unsafe fn set_len(&mut self, len: usize) {
assert!(len <= self.capacity());
self.0.as_mut().len = len as u32;
}
pub(crate) unsafe fn set_start(&mut self, start: u32) {
if start != 0 {
let cap = self.capacity();
let inner = self.as_inner();
assert!(
start <= cap as u32,
"Cannot set start position cap:{} offset:{} len:{} acap:{}",
inner.cap,
inner.offset,
inner.len,
cap
);
inner.offset += start;
if inner.len > start {
inner.len -= start;
} else {
inner.len = 0;
}
}
}
}
impl Drop for StorageVec {
fn drop(&mut self) {
release_shared_vec(self.0.as_ptr());
}
}
impl Shared {
fn is_unique(&self) -> bool {
self.ref_count.load(Acquire) == 1
}
}
impl SharedVec {
fn create(cap: usize, src: &[u8]) -> *mut SharedVec {
let vec_cap = if cap % SHARED_VEC_SIZE != 0 {
(cap / SHARED_VEC_SIZE) + 2
} else {
(cap / SHARED_VEC_SIZE) + 1
};
let mut vec: Vec<SharedVec> = Vec::with_capacity(cmp::max(vec_cap, 3));
let cap = vec.capacity() * SHARED_VEC_SIZE;
let shared_ptr = vec.as_mut_ptr();
mem::forget(vec);
unsafe {
ptr::write(
shared_ptr,
SharedVec {
cap,
len: src.len() as u32,
offset: SHARED_VEC_SIZE as u32,
ref_count: AtomicUsize::new(1),
},
);
if !src.is_empty() {
let ptr = shared_ptr.add(1) as *mut u8;
ptr::copy_nonoverlapping(src.as_ptr(), ptr, src.len());
}
}
shared_ptr
}
fn is_unique(&self) -> bool {
self.ref_count.load(Acquire) == 1
}
}
fn release_shared_vec(ptr: *mut SharedVec) {
unsafe {
if (*ptr).ref_count.fetch_sub(1, Release) != 1 {
return;
}
atomic::fence(Acquire);
let cap = (*ptr).cap;
ptr::drop_in_place(ptr);
Vec::<u8>::from_raw_parts(ptr as *mut u8, 0, cap);
}
}
struct Abort;
impl Drop for Abort {
fn drop(&mut self) {
panic!();
}
}
#[inline(never)]
#[cold]
fn abort() {
let _a = Abort;
panic!();
}
#[cfg(test)]
mod tests {
use super::*;
use crate::*;
const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb \
mary had a little lamb, little lamb, little lamb, little lamb, little lamb, little lamb";
#[test]
fn trimdown() {
let mut b = Bytes::from(LONG.to_vec());
assert_eq!(b.inner.capacity(), 263);
unsafe { b.inner.set_len(68) };
assert_eq!(b.len(), 68);
assert_eq!(&b[..], &LONG[..68]);
assert_eq!(b.inner.capacity(), 263);
b.trimdown();
assert_eq!(&b[..], &LONG[..68]);
assert_eq!(b.inner.capacity(), 72);
unsafe { b.inner.set_len(16) };
assert_eq!(&b[..], &LONG[..16]);
b.trimdown();
assert!(b.is_inline());
}
#[test]
#[allow(clippy::unnecessary_fallible_conversions)]
fn bytes_vec() {
let bv = BytesVec::copy_from_slice(LONG);
assert_eq!(bv.capacity(), 264);
assert_eq!(bv.len(), 263);
assert_eq!(bv.as_ref().len(), 263);
assert_eq!(bv.as_ref(), LONG);
assert_eq!(&bv[..], LONG);
let mut bv = BytesVec::copy_from_slice(&b"hello"[..]);
assert_eq!(bv.capacity(), mem::size_of::<SharedVec>() * 2);
assert_eq!(bv.len(), 5);
assert_eq!(bv.as_ref().len(), 5);
assert_eq!(bv.as_ref()[0], b"h"[0]);
bv.put_u8(b" "[0]);
assert_eq!(bv.as_ref(), &b"hello "[..]);
bv.put("world");
assert_eq!(bv, "hello world");
let b = Bytes::from(bv);
assert_eq!(b, "hello world");
let mut b = BytesMut::try_from(b).unwrap();
b.put(".");
assert_eq!(b, "hello world.");
let mut bv = BytesVec::new();
bv.extend_from_slice(b"hello world.");
bv.extend_from_slice(b"hello world.");
bv.extend_from_slice(b"hello world.");
bv.extend_from_slice(b"hello world.");
let p1 = unsafe { bv.inner.as_ptr() as usize };
bv.advance(48);
assert!(bv.is_empty());
assert!(bv.capacity() == 0);
bv.reserve(48);
assert!(bv.is_empty());
assert!(bv.capacity() == 48);
let p2 = unsafe { bv.inner.as_ptr() as usize };
assert!(p1 == p2);
}
}